filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_24182 | import numpy as np
import properties
from ....survey import BaseTimeSurvey
from . import sources
from . import receivers
from .. import resistivity as dc
class Survey(BaseTimeSurvey):
"""
Spectral induced polarization survey
"""
n_pulse = 2
T = 8.0
source_list = properties.List(
"A list of sources for the survey",
properties.Instance("A SimPEG source", sources.BaseSrc),
default=[],
)
def __init__(self, source_list=None, **kwargs):
super().__init__(source_list, **kwargs)
@property
def n_locations(self):
return int(self.nD / self.unique_times.size)
def from_dc_to_sip_survey(survey_dc, times):
"""
Generate sip survey from dc survey
"""
source_list = survey_dc.source_list
source_list_sip = []
for src in source_list:
receiver_list_sip = []
for rx in src.receiver_list:
if isinstance(rx, dc.receivers.Pole):
rx_sip = receivers.Pole(rx.locations, times=times)
elif isinstance(rx, dc.receivers.Dipole):
rx_sip = receivers.Dipole(rx.locations[0], rx.locations[1], times=times)
else:
print(rx)
raise NotImplementedError()
receiver_list_sip.append(rx_sip)
if isinstance(src, dc.sources.Pole):
src_sip = sources.Pole(receiver_list_sip, src.loc)
elif isinstance(src, dc.sources.Dipole):
src_sip = sources.Dipole(receiver_list_sip, src.loc[0], src.loc[1])
else:
print(src)
raise NotImplementedError()
source_list_sip.append(src_sip)
survey_sip = Survey(source_list_sip)
return survey_sip
|
the-stack_0_24183 | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The StarChain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test StarChaind with different proxy configuration.
Test plan:
- Start StarChaind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on StarChaind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create StarChainds that connect to them
- Manipulate the StarChainds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import StarChainTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(StarChainTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: StarChaind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: StarChaind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("StarChainostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"StarChainostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
|
the-stack_0_24184 | # Copyright 2016 Intel Corporation
# Copyright 2013,2014 Cray Inc
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from ironic.common.i18n import _
opts = [
cfg.IntOpt('power_timeout',
default=10,
help=_('Seconds to wait for power action to be completed')),
# NOTE(yuriyz): some of SNMP-enabled hardware have own options for pause
# between off and on. This option guarantees minimal value.
cfg.IntOpt('reboot_delay',
default=0,
min=0,
help=_('Time (in seconds) to sleep between when rebooting '
'(powering off and on again)')),
cfg.FloatOpt('udp_transport_timeout',
default=1.0,
min=0.0,
help=_('Response timeout in seconds used for UDP transport. '
'Timeout should be a multiple of 0.5 seconds and '
'is applicable to each retry.')),
cfg.IntOpt('udp_transport_retries',
default=5,
min=0,
help=_('Maximum number of UDP request retries, '
'0 means no retries.')),
]
def register_opts(conf):
conf.register_opts(opts, group='snmp')
|
the-stack_0_24185 | # -*- coding: utf-8 -*-
import logging
from box import Box
from PySide6 import QtCore, QtWidgets, QtGui
from fastflix.encoders.common.setting_panel import SettingPanel
from fastflix.language import t
from fastflix.models.encode import VCEEncCAVCSettings
from fastflix.models.fastflix_app import FastFlixApp
from fastflix.shared import link
from fastflix.resources import get_icon
logger = logging.getLogger("fastflix")
presets = ["balanced", "fast", "slow"]
recommended_bitrates = [
"200k (320x240p @ 30fps)",
"300k (640x360p @ 30fps)",
"1000k (640x480p @ 30fps)",
"1750k (1280x720p @ 30fps)",
"2500k (1280x720p @ 60fps)",
"4000k (1920x1080p @ 30fps)",
"5000k (1920x1080p @ 60fps)",
"7000k (2560x1440p @ 30fps)",
"10000k (2560x1440p @ 60fps)",
"15000k (3840x2160p @ 30fps)",
"20000k (3840x2160p @ 60fps)",
"Custom",
]
recommended_crfs = [
"28",
"27",
"26",
"25",
"24",
"23",
"22",
"21",
"20",
"19",
"18",
"17",
"16",
"15",
"14",
"Custom",
]
def get_breaker():
breaker_line = QtWidgets.QWidget()
breaker_line.setMaximumHeight(2)
breaker_line.setStyleSheet("background-color: #ccc; margin: auto 0; padding: auto 0;")
return breaker_line
class VCEENCCAVC(SettingPanel):
profile_name = "vceencc_avc"
hdr10plus_signal = QtCore.Signal(str)
hdr10plus_ffmpeg_signal = QtCore.Signal(str)
def __init__(self, parent, main, app: FastFlixApp):
super().__init__(parent, main, app)
self.main = main
self.app = app
grid = QtWidgets.QGridLayout()
self.widgets = Box(mode=None)
self.mode = "Bitrate"
self.updating_settings = False
grid.addLayout(self.init_modes(), 0, 2, 4, 4)
grid.addLayout(self._add_custom(title="Custom VCEEncC options", disable_both_passes=True), 10, 0, 1, 6)
grid.addLayout(self.init_preset(), 0, 0, 1, 2)
grid.addLayout(self.init_profile(), 1, 0, 1, 2)
grid.addLayout(self.init_mv_precision(), 2, 0, 1, 2)
grid.addLayout(self.init_pre(), 3, 0, 1, 2)
breaker = QtWidgets.QHBoxLayout()
breaker_label = QtWidgets.QLabel(t("Advanced"))
breaker_label.setFont(QtGui.QFont("helvetica", 8, weight=55))
breaker.addWidget(get_breaker(), stretch=1)
breaker.addWidget(breaker_label, alignment=QtCore.Qt.AlignHCenter)
breaker.addWidget(get_breaker(), stretch=1)
grid.addLayout(breaker, 4, 0, 1, 6)
qp_line = QtWidgets.QHBoxLayout()
qp_line.addLayout(self.init_min_q())
qp_line.addStretch(1)
qp_line.addLayout(self.init_max_q())
qp_line.addStretch(1)
qp_line.addLayout(self.init_ref())
qp_line.addStretch(1)
qp_line.addLayout(self.init_b_frames())
qp_line.addStretch(1)
qp_line.addLayout(self.init_level())
qp_line.addStretch(1)
qp_line.addLayout(self.init_decoder())
qp_line.addStretch(1)
qp_line.addLayout(self.init_metrics())
grid.addLayout(qp_line, 5, 0, 1, 6)
self.ffmpeg_level = QtWidgets.QLabel()
grid.addWidget(self.ffmpeg_level, 8, 2, 1, 4)
grid.setRowStretch(9, 1)
guide_label = QtWidgets.QLabel(
link(
"https://github.com/rigaya/VCEEnc/blob/master/VCEEncC_Options.en.md",
t("VCEEncC Options"),
app.fastflix.config.theme,
)
)
warning_label = QtWidgets.QLabel()
warning_label.setPixmap(QtGui.QIcon(get_icon("onyx-warning", self.app.fastflix.config.theme)).pixmap(22))
guide_label.setAlignment(QtCore.Qt.AlignBottom)
guide_label.setOpenExternalLinks(True)
grid.addWidget(guide_label, 11, 0, 1, 4)
grid.addWidget(warning_label, 11, 4, 1, 1, alignment=QtCore.Qt.AlignRight)
grid.addWidget(QtWidgets.QLabel(t("VCEEncC Encoder support is still experimental!")), 11, 5, 1, 1)
self.setLayout(grid)
self.hide()
self.hdr10plus_signal.connect(self.done_hdr10plus_extract)
self.hdr10plus_ffmpeg_signal.connect(lambda x: self.ffmpeg_level.setText(x))
def init_preset(self):
return self._add_combo_box(
label="Preset",
widget_name="preset",
options=presets,
tooltip="preset: The slower the preset, the better the compression and quality",
connect="default",
opt="preset",
)
def init_profile(self):
return self._add_combo_box(
label="Profile",
widget_name="profile",
options=["Baseline", "Main", "High"],
connect="default",
opt="profile",
)
def init_mv_precision(self):
return self._add_combo_box(
label="Motion vector accuracy",
tooltip="Q-pel is highest precision",
widget_name="mv_precision",
options=["q-pel", "half-pel", "full-pel"],
opt="mv_precision",
)
def init_lookahead(self):
return self._add_combo_box(
label="Lookahead",
tooltip="",
widget_name="lookahead",
opt="lookahead",
options=["off"] + [str(x) for x in range(1, 33)],
)
def init_pre(self):
layout = QtWidgets.QHBoxLayout()
layout.addLayout(self._add_check_box(label="VBAQ", widget_name="vbaq", opt="vbaq"))
layout.addLayout(self._add_check_box(label="Pre Encode", widget_name="pre_encode", opt="pre_encode"))
layout.addLayout(self._add_check_box(label="Pre Analysis", widget_name="pre_analysis", opt="pre_analysis"))
return layout
def init_level(self):
layout = self._add_combo_box(
label="Level",
tooltip="Set the encoding level restriction",
widget_name="level",
options=[
t("Auto"),
"1.0",
"2.0",
"2.1",
"3.0",
"3.1",
"4.0",
"4.1",
"5.0",
"5.1",
"5.2",
],
opt="level",
)
self.widgets.level.setMinimumWidth(60)
return layout
@staticmethod
def _qp_range():
return [str(x) for x in range(0, 52)]
def init_min_q(self):
layout = QtWidgets.QHBoxLayout()
layout.addWidget(QtWidgets.QLabel(t("Min Q")))
layout.addWidget(
self._add_combo_box(widget_name="min_q", options=["I"] + self._qp_range(), min_width=45, opt="min_q")
)
return layout
def init_max_q(self):
layout = QtWidgets.QHBoxLayout()
layout.addWidget(QtWidgets.QLabel(t("Max Q")))
layout.addWidget(
self._add_combo_box(widget_name="max_q", options=["I"] + self._qp_range(), min_width=45, opt="max_q")
)
return layout
def init_b_frames(self):
return self._add_combo_box(
widget_name="b_frames",
label="B Frames",
options=[t("Auto")] + [str(x) for x in range(3)],
opt="b_frames",
min_width=60,
)
def init_ref(self):
return self._add_combo_box(
widget_name="ref",
label="Ref Frames",
options=[t("Auto")] + [str(x) for x in range(17)],
opt="ref",
min_width=60,
)
def init_decoder(self):
return self._add_combo_box(
widget_name="decoder",
label="Decoder",
options=["Hardware", "Software"],
opt="decoder",
tooltip="Hardware: use libavformat + hardware decoder for input\nSoftware: use avcodec + software decoder",
min_width=80,
)
def init_metrics(self):
return self._add_check_box(
widget_name="metrics",
opt="metrics",
label="Metrics",
tooltip="Calculate PSNR and SSIM and show in the encoder output",
)
def init_modes(self):
layout = self._add_modes(recommended_bitrates, recommended_crfs, qp_name="cqp")
return layout
def mode_update(self):
self.widgets.custom_cqp.setDisabled(self.widgets.cqp.currentText() != "Custom")
self.widgets.custom_bitrate.setDisabled(self.widgets.bitrate.currentText() != "Custom")
self.main.build_commands()
def setting_change(self, update=True):
if self.updating_settings:
return
self.updating_settings = True
if update:
self.main.page_update()
self.updating_settings = False
def update_video_encoder_settings(self):
settings = VCEEncCAVCSettings(
preset=self.widgets.preset.currentText().split("-")[0].strip(),
mv_precision=self.widgets.mv_precision.currentText(),
max_q=self.widgets.max_q.currentText() if self.widgets.max_q.currentIndex() != 0 else None,
min_q=self.widgets.min_q.currentText() if self.widgets.min_q.currentIndex() != 0 else None,
extra=self.ffmpeg_extras,
metrics=self.widgets.metrics.isChecked(),
level=self.widgets.level.currentText() if self.widgets.level.currentIndex() != 0 else None,
b_frames=self.widgets.b_frames.currentText() if self.widgets.b_frames.currentIndex() != 0 else None,
ref=self.widgets.ref.currentText() if self.widgets.ref.currentIndex() != 0 else None,
pre_encode=self.widgets.pre_encode.isChecked(),
pre_analysis=self.widgets.pre_analysis.isChecked(),
vbaq=self.widgets.vbaq.isChecked(),
decoder=self.widgets.decoder.currentText(),
)
encode_type, q_value = self.get_mode_settings()
settings.cqp = q_value if encode_type == "qp" else None
settings.bitrate = q_value if encode_type == "bitrate" else None
self.app.fastflix.current_video.video_settings.video_encoder_settings = settings
def set_mode(self, x):
self.mode = x.text()
self.widgets.min_q.setEnabled(self.mode.lower() == "bitrate")
self.widgets.max_q.setEnabled(self.mode.lower() == "bitrate")
self.main.build_commands()
def new_source(self):
if not self.app.fastflix.current_video:
return
super().new_source()
|
the-stack_0_24187 | "Template support for Cheetah"
import sys, os, imp
from Cheetah import Compiler
import pkg_resources
def _recompile_template(package, basename, tfile, classname):
tmpl = pkg_resources.resource_string(package, "%s.tmpl" % basename)
c = Compiler.Compiler(source=tmpl, mainClassName='GenTemplate')
code = str(c)
mod = imp.new_module(classname)
ns = dict()
exec(code, ns)
tempclass = ns.get("GenTemplate",
ns.get('DynamicallyCompiledCheetahTemplate'))
assert tempclass
tempclass.__name__ = basename
setattr(mod, basename, tempclass)
sys.modules[classname] = mod
return mod
class TurboCheetah:
extension = "tmpl"
def __init__(self, extra_vars_func=None, options=None):
if options is None:
options = dict()
self.get_extra_vars = extra_vars_func
self.options = options
self.compiledTemplates = {}
self.search_path = []
def load_template(self, template=None,
template_string=None, template_file=None,
loadingSite=False):
"""Searches for a template along the Python path.
Template files must end in ".tmpl" and be in legitimate packages.
"""
given = len([_f for _f in (template, template_string, template_file) if _f])
if given > 1:
raise TypeError(
"You may give only one of template, template_string, and "
"template_file")
if not given:
raise TypeError(
"You must give one of template, template_string, or "
"template_file")
if template:
return self.load_template_module(template)
elif template_string:
return self.load_template_string(template_string)
elif template_file:
return self.load_template_file(template_file)
def load_template_module(self, classname):
ct = self.compiledTemplates
divider = classname.rfind(".")
if divider > -1:
package = classname[0:divider]
basename = classname[divider+1:]
else:
raise ValueError("All templates must be in a package")
if not self.options.get("cheetah.precompiled", False):
tfile = pkg_resources.resource_filename(package,
"%s.%s" %
(basename,
self.extension))
if classname in ct:
mtime = os.stat(tfile).st_mtime
if ct[classname] != mtime:
ct[classname] = mtime
del sys.modules[classname]
mod = _recompile_template(package, basename,
tfile, classname)
else:
mod = __import__(classname, dict(), dict(), [basename])
else:
ct[classname] = os.stat(tfile).st_mtime
mod = _recompile_template(package, basename,
tfile, classname)
else:
mod = __import__(classname, dict(), dict(), [basename])
tempclass = getattr(mod, basename)
return tempclass
def load_template_string(self, content):
raise NotImplementedError
def load_template_file(self, filename):
raise NotImplementedError
def render(self, info, format="html", fragment=False, template=None,
template_string=None, template_file=None):
tclass = self.load_template(
template=template, template_string=template_string,
template_file=template_file)
if self.get_extra_vars:
extra = self.get_extra_vars()
else:
extra = {}
tempobj = tclass(searchList=[info, extra])
if fragment:
return tempobj.fragment()
else:
return tempobj.respond()
|
the-stack_0_24188 | import heapq
from dataclasses import dataclass, field
from operator import lt
from typing import Dict, List, Optional, Tuple
# default collection name if none is specified.
DEFAULT_COLLECTION_NAME = "default_collection"
"""
Time Taken By Me -> 33 mins 18 secs
Atlassian LLD Round -:
Design the following -:
Given a list of [FileName, FileSize, [Collection]]
- A collection can have 1 or more files.
- Same file can be a part of more than 1 collection.
How would you design a system
- To calculate total size of files processed.
- To calculate Top-K collections based on size.
Example:
file1.txt(size: 100)
file2.txt(size: 200) in collection "collection1"
file3.txt(size: 200) in collection "collection1"
file4.txt(size: 300) in collection "collection2"
file5.txt(size: 100)
Output:
Total size of files processed: 900
Top 2 collections:
- collection1 : 400
- collection2 : 300
"""
@dataclass
class Attributes:
# dummy base class which can store some common attributes b/w File and Directory.
pass
@dataclass()
class File(Attributes):
# This represents a file in our file system.
name: str
size: float
dir_name: str
@dataclass
class Directory(Attributes):
# This represents a directory in our file system.
name: str
size: float = 0
files: List[File] = field(default_factory=list)
class DirectoryWithSize(object):
def __init__(self, dir_name:str, dir_size:float) -> None:
self.dir_name = dir_name
self.dir_size = dir_size
def __lt__(self, other):
return lt(self.dir_size, other.dir_size)
@dataclass
class FileSystem:
# This is the file system that we are trying to model here
_total_file_system_size: float = 0
all_files: Dict[str, float] = field(default_factory=dict)
directory_mapping: Dict[str, Directory] = field(default_factory=dict)
directory_present_in_system: set = field(default_factory=set)
def get_total_file_system_size(self) -> float:
return self._total_file_system_size
def add_file_to_directory(
self, file_name: str, file_size: float, file_directory: Optional[str]
) -> None:
# add the directory to our file system first if it doesn't exists.
if file_directory not in self.directory_present_in_system:
file_directory = file_directory or DEFAULT_COLLECTION_NAME
self.directory_present_in_system.add(file_directory)
self.directory_mapping[file_directory] = Directory(name=file_directory)
# create the file object and update the respective collections accordingly.
current_file = File(
name=file_name,
size=file_size,
dir_name=file_directory,
)
current_directory = self.directory_mapping.get(file_directory)
current_directory.files.append(current_file)
current_directory.size += file_size
# increment the global file system size
self._total_file_system_size += file_size
self.all_files[current_file.dir_name] = current_directory.size
print(
f"File named {file_name} and size {file_size} was successfully added to our file_system under {file_directory}."
)
def get_top_k_directory(self, top_k: int) -> List[Tuple[str, float]]:
# let's make a heap from the lsit of <dir_name, dir_size> and then get the top_k basically.
# it can actually be moved out and we can maintain a fixed heap in global space as well.
_max_heap = []
for dir_name, dir_size in self.all_files.items():
heapq.heappush(_max_heap, DirectoryWithSize(dir_name, -1 * dir_size))
_results = []
for _ in range(0, top_k):
dir_obj = heapq.heappop(_max_heap)
dir_name, dir_size = dir_obj.dir_name, -1 * dir_obj.dir_size
_results.append((dir_name, dir_size))
return _results
if __name__ == "__main__":
files = [
["file_1.txt", 10000, ""],
["file_2.txt", 1000, "collection_1"],
["file_3.txt", 1210, "collection_2"],
["file_4.txt", 300, "collection_1"],
["file_5.txt", 600, "collection_2"],
["file_6.txt", 500, "collection_5"],
]
top_k = 2
fp = FileSystem()
for (file_name, file_size, file_directory) in files:
fp.add_file_to_directory(file_name, file_size, file_directory)
print(fp.all_files)
print("\n")
print("Total Processed -: \n\t", fp.get_total_file_system_size())
print(f"Top-{top_k} collections are -: \n\t ", fp.get_top_k_directory(top_k=top_k))
|
the-stack_0_24190 | from collections import OrderedDict
from itertools import islice
from cached_property import cached_property
from devito.dimension import Dimension
from devito.ir.equations import ClusterizedEq
from devito.symbolics import (as_symbol, retrieve_indexed, retrieve_terminals,
q_indirect, q_timedimension)
from devito.tools import DefaultOrderedDict, flatten, filter_ordered
from devito.types import Symbol
__all__ = ['FlowGraph']
class Node(ClusterizedEq):
"""
A special :class:`ClusterizedEq` which keeps track of: ::
- :class:`sympy.Eq` writing to ``self``
- :class:`sympy.Eq` reading from ``self``
"""
_state = ClusterizedEq._state + ('reads', 'readby')
@property
def function(self):
return self.lhs.function
@property
def reads(self):
return self._reads
@property
def readby(self):
return self._readby
@property
def is_unbound_temporary(self):
return self.function.is_Array and not self.reads and not self.readby
def __repr__(self):
reads = '[%s%s]' % (', '.join([str(i) for i in self.reads][:2]), '%s')
reads = reads % ('' if len(self.reads) <= 2 else ', ...')
readby = '[%s%s]' % (', '.join([str(i) for i in self.readby][:2]), '%s')
readby = readby % ('' if len(self.readby) <= 2 else ', ...')
return "Node(key=%s, reads=%s, readby=%s)" % (self.lhs, reads, readby)
class FlowGraph(OrderedDict):
"""
A FlowGraph represents an ordered sequence of operations. Operations,
of type :class:`Node`, are the nodes of the graph. An edge from ``n0`` to
``n1`` indicates that ``n1`` reads from ``n0``. For example, the sequence: ::
temp0 = a*b
temp1 = temp0*c
temp2 = temp0*d
temp3 = temp1 + temp2
is represented by the following FlowGraph: ::
temp0 ---> temp1
| |
| |
v v
temp2 ---> temp3
The input and output edges of a node ``n`` are encoded in ``n.reads`` and
``n.readby``, respectively.
Operations may involve scalars and indexed objects (arrays). The indices
of the indexed objects represent either "space" or "time" dimensions.
"""
def __init__(self, exprs, **kwargs):
# Always convert to SSA
exprs = makeit_ssa(exprs)
mapper = OrderedDict([(i.lhs, i) for i in exprs])
assert len(set(mapper)) == len(exprs), "not SSA Cluster?"
# Construct the Nodes, tracking reads and readby
tensor_map = DefaultOrderedDict(list)
for i in mapper:
tensor_map[as_symbol(i)].append(i)
reads = DefaultOrderedDict(set)
readby = DefaultOrderedDict(set)
for k, v in mapper.items():
handle = retrieve_terminals(v.rhs)
for i in list(handle):
if i.is_Indexed:
for idx in i.indices:
handle |= retrieve_terminals(idx)
reads[k].update(set(flatten([tensor_map.get(as_symbol(i), [])
for i in handle])))
for i in reads[k]:
readby[i].add(k)
# Make sure read-after-writes are honored for scalar nodes
processed = [i for i in mapper if i.is_Indexed]
queue = [i for i in mapper if i not in processed]
while queue:
k = queue.pop(0)
if not readby[k] or k in readby[k]:
processed.insert(0, k)
elif all(i in processed for i in readby[k]):
index = min(processed.index(i) for i in readby[k])
processed.insert(index, k)
else:
queue.append(k)
# Build up the FlowGraph
nodes = [(i, Node(mapper[i], reads=reads[i], readby=readby[i]))
for i in processed]
super(FlowGraph, self).__init__(nodes, **kwargs)
# Determine indices along the space and time dimensions
terms = [v for k, v in self.items() if v.is_Tensor and not q_indirect(k)]
indices = filter_ordered(flatten([i.function.indices for i in terms]))
self.space_indices = tuple(i for i in indices if i.is_Space)
self.time_indices = tuple(i for i in indices if i.is_Time)
def trace(self, key, readby=False, strict=False):
"""
Return the sequence of operations required to compute the node ``key``.
If ``readby = True``, then return the sequence of operations that will
depend on ``key``, instead. With ``strict = True``, drop ``key`` from the
result.
"""
if key not in self:
return []
# OrderedDicts, besides preserving the scheduling order, also prevent
# scheduling the same node more than once
found = OrderedDict()
queue = OrderedDict([(key, self[key])])
while queue:
k, v = queue.popitem(last=False)
reads = self.extract(k, readby=readby)
if set(reads).issubset(set(found.values())):
# All dependencies satisfied, schedulable
found[k] = v
else:
# Tensors belong to other traces, so they can be scheduled straight away
tensors = [i for i in reads if i.is_Tensor]
found = OrderedDict(list(found.items()) + [(i.lhs, i) for i in tensors])
# Postpone the rest until all dependening nodes got scheduled
scalars = [i for i in reads if i.is_Scalar]
queue = OrderedDict([(i.lhs, i) for i in scalars] +
[(k, v)] + list(queue.items()))
if strict is True:
found.pop(key)
return tuple(found.values())
def time_invariant(self, expr=None):
"""
Check if ``expr`` is time invariant. ``expr`` may be an expression ``e``
explicitly tracked by the FlowGraph or even a generic subexpression
of ``e``. If no ``expr`` is provided, then time invariance of the entire
FlowGraph is assessed.
"""
if expr is None:
return all(self.time_invariant(v) for v in self.values())
if any(q_timedimension(i) for i in expr.free_symbols):
return False
queue = [expr.rhs if expr.is_Equality else expr]
seen = set()
while queue:
item = queue.pop()
nodes = set()
for i in retrieve_terminals(item):
if i in seen:
# Already inspected, nothing more can be inferred
continue
elif any(isinstance(j, Dimension) and j.is_Time for j in i.free_symbols):
# Definitely not time-invariant
return False
elif i in self:
# Go on with the search
nodes.add(i)
elif isinstance(i, Dimension):
# Go on with the search, as /i/ is not a time dimension
pass
elif not i.base.function.is_TensorFunction:
# It didn't come from the outside and it's not in self, so
# cannot determine if time-invariant; assume time-varying
return False
seen.add(i)
queue.extend([self[i].rhs for i in nodes])
return True
def is_index(self, key):
"""
Return True if ``key`` is used as array index in an expression of the
FlowGraph, False otherwise.
"""
if key not in self:
return False
match = key.base.label if self[key].is_Tensor else key
for i in self.extract(key, readby=True):
for e in retrieve_indexed(i):
if any(match in idx.free_symbols for idx in e.indices):
return True
return False
def extract(self, key, readby=False):
"""
Return the list of nodes appearing in ``key.reads``, in program order
(ie, based on the order in which the nodes appear in ``self``). If
``readby is True``, then return instead the list of nodes appearing
``key.readby``, in program order.
Examples
========
Given the following sequence of operations: ::
t1 = ...
t0 = ...
u[i, j] = ... v ...
u[3, j] = ...
v = t0 + t1 + u[z, k]
t2 = ...
Assuming ``key == v`` and ``readby is False`` (as by default), return
the following list of :class:`Node` objects: ::
[t1, t0, u[i, j], u[3, j]]
If ``readby is True``, return: ::
[v, t2]
"""
if key not in self:
return []
match = self[key].reads if readby is False else self[key].readby
found = []
for k, v in self.items():
if k in match:
found.append(v)
return found
def __getitem__(self, key):
if not isinstance(key, slice):
return super(FlowGraph, self).__getitem__(key)
offset = key.step or 0
try:
start = list(self.keys()).index(key.start) + offset
except ValueError:
start = 0
try:
stop = list(self.keys()).index(key.stop) + offset
except ValueError:
stop = None
return FlowGraph(islice(list(self.items()), start, stop))
@cached_property
def unknown(self):
"""
Return all symbols appearing in self for which a node is not available.
"""
known = {v.function for v in self.values()}
reads = set([i.base.function for i in
flatten(retrieve_terminals(v.rhs) for v in self.values())])
return reads - known
@cached_property
def tensors(self):
"""
Return all occurrences of the tensors in ``self`` keyed by function.
"""
mapper = {}
for v in self.values():
handle = retrieve_indexed(v)
for i in handle:
found = mapper.setdefault(i.base.function, [])
if i not in found:
# Not using sets to preserve order
found.append(i)
return mapper
def makeit_ssa(exprs):
"""
Convert an iterable of :class:`Eq`s into Static Single Assignment (SSA) form.
"""
# Identify recurring LHSs
seen = {}
for i, e in enumerate(exprs):
seen.setdefault(e.lhs, []).append(i)
# Optimization: don't waste time reconstructing stuff if already in SSA form
if all(len(i) == 1 for i in seen.values()):
return exprs
# SSA conversion
c = 0
mapper = {}
processed = []
for i, e in enumerate(exprs):
where = seen[e.lhs]
rhs = e.rhs.xreplace(mapper)
if len(where) > 1:
needssa = e.is_Scalar or where[-1] != i
lhs = Symbol(name='ssa%d' % c, dtype=e.dtype) if needssa else e.lhs
if e.is_Increment:
# Turn AugmentedAssignment into Assignment
processed.append(e.func(lhs, mapper[e.lhs] + rhs, is_Increment=False))
else:
processed.append(e.func(lhs, rhs))
mapper[e.lhs] = lhs
c += 1
else:
processed.append(e.func(e.lhs, rhs))
return processed
|
the-stack_0_24192 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('pyagentx.network')
logger.addHandler(NullHandler())
# --------------------------------------------
import socket
import time
import threading
import queue as Queue
import pyagentx
from pyagentx.pdu import PDU
class Network(threading.Thread):
def __init__(self, queue, oid_list, sethandlers):
threading.Thread.__init__(self)
self.stop = threading.Event()
self._queue = queue
self._oid_list = oid_list
self._sethandlers = sethandlers
self.session_id = 0
self.transaction_id = 0
self.debug = 1
# Data Related Variables
self.data = {}
self.data_idx = []
def _connect(self):
while True:
try:
self.socket = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM )
self.socket.connect(pyagentx.SOCKET_PATH)
self.socket.settimeout(0.1)
return
except socket.error:
logger.error("Failed to connect, sleeping and retrying later")
time.sleep(2)
def new_pdu(self, type):
pdu = PDU(type)
pdu.session_id = self.session_id
pdu.transaction_id = self.transaction_id
self.transaction_id += 1
return pdu
def response_pdu(self, org_pdu):
pdu = PDU(pyagentx.AGENTX_RESPONSE_PDU)
pdu.session_id = org_pdu.session_id
pdu.transaction_id = org_pdu.transaction_id
pdu.packet_id = org_pdu.packet_id
return pdu
def send_pdu(self, pdu):
if self.debug: pdu.dump()
self.socket.send(pdu.encode())
def recv_pdu(self):
buf = self.socket.recv(1024)
if not buf: return None
pdu = PDU()
pdu.decode(buf)
if self.debug: pdu.dump()
return pdu
# =========================================
def _get_updates(self):
while True:
try:
item = self._queue.get_nowait()
logger.debug('New update')
update_oid = item['oid']
update_data = item['data']
# clear values with prefix oid
for oid in list(self.data.keys()):
if oid.startswith(update_oid):
del(self.data[oid])
# insert updated value
for row in list(update_data.values()):
oid = "%s.%s" % (update_oid, row['name'])
self.data[oid] = {'name': oid, 'type':row['type'],
'value':row['value']}
# recalculate reverse index if data changed
self.data_idx = sorted(list(self.data.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
except Queue.Empty:
break
def _get_next_oid(self, oid, endoid):
if oid in self.data:
# Exact match found
#logger.debug('get_next_oid, exact match of %s' % oid)
idx = self.data_idx.index(oid)
if idx == (len(self.data_idx)-1):
# Last Item in MIB, No match!
return None
return self.data_idx[idx+1]
else:
# No exact match, find prefix
#logger.debug('get_next_oid, no exact match of %s' % oid)
slist = oid.split('.')
elist = endoid.split('.')
for tmp_oid in self.data_idx:
tlist = tmp_oid.split('.')
for i in range(len(tlist)):
try:
sok = int(slist[i]) <= int(tlist[i])
eok = int(elist[i]) >= int(tlist[i])
if not ( sok and eok ):
break
except IndexError:
pass
if sok and eok:
return tmp_oid
return None # No match!
def start(self):
while True:
try:
self._start_network()
except socket.error:
logger.error("Network error, master disconnect?!")
def _start_network(self):
self._connect()
logger.info("==== Open PDU ====")
pdu = self.new_pdu(pyagentx.AGENTX_OPEN_PDU)
self.send_pdu(pdu)
pdu = self.recv_pdu()
self.session_id = pdu.session_id
logger.info("==== Ping PDU ====")
pdu = self.new_pdu(pyagentx.AGENTX_PING_PDU)
self.send_pdu(pdu)
pdu = self.recv_pdu()
logger.info("==== Register PDU ====")
for oid in self._oid_list:
logger.info("Registering: %s" % (oid))
pdu = self.new_pdu(pyagentx.AGENTX_REGISTER_PDU)
pdu.oid = oid
self.send_pdu(pdu)
pdu = self.recv_pdu()
logger.info("==== Waiting for PDU ====")
while True:
try:
self._get_updates()
request = self.recv_pdu()
except socket.timeout:
continue
if not request:
logger.error("Empty PDU, connection closed!")
raise socket.error
response = self.response_pdu(request)
if request.type == pyagentx.AGENTX_GET_PDU:
logger.info("Received GET PDU")
for rvalue in request.range_list:
oid = rvalue[0]
logger.debug("OID: %s" % (oid))
if oid in self.data:
logger.debug("OID Found")
response.values.append(self.data[oid])
else:
logger.debug("OID Not Found!")
response.values.append({'type':pyagentx.TYPE_NOSUCHOBJECT, 'name':rvalue[0], 'value':0})
elif request.type == pyagentx.AGENTX_GETNEXT_PDU:
logger.info("Received GET_NEXT PDU")
for rvalue in request.range_list:
oid = self._get_next_oid(rvalue[0],rvalue[1])
logger.debug("GET_NEXT: %s => %s" % (rvalue[0], oid))
if oid:
response.values.append(self.data[oid])
else:
response.values.append({'type':pyagentx.TYPE_ENDOFMIBVIEW, 'name':rvalue[0], 'value':0})
elif request.type == pyagentx.AGENTX_TESTSET_PDU:
logger.info("Received TESTSET PDU")
idx = 0
for row in request.values:
idx += 1
oid = row['name']
type_ = pyagentx.TYPE_NAME.get(row['type'], 'Unknown type')
value = row['data']
logger.info("Name: [%s] Type: [%s] Value: [%s]" % (oid, type_, value))
# Find matching sethandler
matching_oid = ''
for target_oid in self._sethandlers:
if oid.startswith(target_oid):
matching_oid = target_oid
break
if matching_oid == '':
logger.debug('TestSet request failed: not writeable #%s' % idx)
response.error = pyagentx.ERROR_NOTWRITABLE
response.error_index = idx
break
try:
self._sethandlers[matching_oid].network_test(request.session_id, request.transaction_id, oid, row['data'])
except pyagentx.SetHandlerError:
logger.debug('TestSet request failed: wrong value #%s' % idx)
response.error = pyagentx.ERROR_WRONGVALUE
response.error_index = idx
break
logger.debug('TestSet request passed')
elif request.type == pyagentx.AGENTX_COMMITSET_PDU:
for handler in list(self._sethandlers.values()):
handler.network_commit(request.session_id, request.transaction_id)
logger.info("Received COMMITSET PDU")
elif request.type == pyagentx.AGENTX_UNDOSET_PDU:
for handler in list(self._sethandlers.values()):
handler.network_undo(request.session_id, request.transaction_id)
logger.info("Received UNDOSET PDU")
elif request.type == pyagentx.AGENTX_CLEANUPSET_PDU:
for handler in list(self._sethandlers.values()):
handler.network_cleanup(request.session_id, request.transaction_id)
logger.info("Received CLEANUP PDU")
self.send_pdu(response)
|
the-stack_0_24193 | import json
import re
from pathlib import Path
from typing import Iterable, List
import jupytext
import pytest
import toml
import yaml
from nbformat import NotebookNode
from implectus.config import IMPLECTUS_CONFIG_FILES
@pytest.fixture
def tmpdir_cd(tmpdir):
"""Create a temporary directory and change to it for the duration of the test."""
with tmpdir.as_cwd():
yield tmpdir
def write_config(config_file_path, config: dict):
"""Write an Implectus config file in toml, yaml, json, or py format."""
path = Path(config_file_path)
path.parent.mkdir(exist_ok=True, parents=True)
if path.suffix == ".py":
config_src = [f'c.{k} = "{v}"' for (k, v) in config.items()]
path.write_text("\n".join(config_src))
else:
with path.open("w") as f:
if path.suffix == ".yaml" or path.suffix == ".yml":
yaml.dump(config, f)
elif path.suffix == ".json":
json.dump(config, f)
else:
toml.dump(config, f)
def nb_to_py(path, strip_metadata=True):
"""Return the contents of a notebook (or script) as py:light."""
nb = jupytext.read(path)
if strip_metadata:
nb.metadata.setdefault("jupytext", {})["notebook_metadata_filter"] = "-all"
return jupytext.writes(nb, fmt="py:light")
def _strip_trailing_slash(pattern_or_patterns):
if pattern_or_patterns is None:
return []
elif type(pattern_or_patterns) in (list, tuple):
return [_strip_trailing_slash(pattern) for pattern in pattern_or_patterns]
else:
pattern = str(pattern_or_patterns)
if pattern.endswith("/"):
pattern = pattern[:-1]
return pattern
def _path_expected(path: Path, expected):
"""Check if the given path or one of its parent dirs is in `expected`.
Also ignore config files and .ipynb_checkpoints.
"""
if _strip_trailing_slash(path) in expected:
return True
for parent in path.parents:
if str(parent) in expected:
return True
# Ignore these as well to save typing
if path.name in IMPLECTUS_CONFIG_FILES or ".ipynb_checkpoints" in path.parts:
return True
return False
def no_extra_files(path=".", expected: Iterable = None):
"""Check if the given path contains only files and dirs from `expected`.
Also ignore config files and .ipynb_checkpoints.
"""
expected = _strip_trailing_slash(expected)
unexpected_dirs = [] # type: List[str]
for file in Path(path).glob("**/*"):
if _path_expected(file, expected):
# Drop parent dir from unexpected_dirs list
for parent in file.parents:
if str(parent) in unexpected_dirs:
unexpected_dirs.remove(str(parent))
continue
if file.is_dir():
unexpected_dirs.append(str(file))
continue
print("Unexpected file", file)
return False
return len(unexpected_dirs) == 0
def _resolve_code(code_or_file, _load=lambda f: Path(f).read_text()):
"""If `code_or_file` is a filename then load it, otherwise return it as-is."""
if type(code_or_file) is str and ("\n" in code_or_file or len(code_or_file) > 256):
# Probably code
return code_or_file
if type(code_or_file) is str and code_or_file.endswith(".py"):
# Probably filename
assert Path(code_or_file).is_file()
return _load(code_or_file)
if Path(code_or_file).is_file():
# Probably filename
return _load(code_or_file)
return code_or_file
def _split_header(code):
"""Extract the Implectus header from a generated code string."""
lines = code.splitlines()
assert len(lines) > 3
assert lines[0].startswith("# # Autogenerated"), "Missing header"
assert lines[1].startswith("# This file was"), "Missing header"
assert lines[2].startswith("# edit `"), "Missing header"
assert not lines[3], "Missing gap"
return "\n".join(lines[:3]), "\n".join(lines[4:])
def _remove_header_version(header):
"""Remove the version number from an Implectus header."""
return re.sub(r"Implectus [\d\.]+?\. ", "Implectus. ", header)
def code_equal(actual, expected, assert_=True, _resolve=_resolve_code):
"""Check whether generated code files or strings are equivalent.
Only works when both are py:light for now.
"""
actual = _resolve(actual)
expected = _resolve(expected)
actual_header, actual_code = _split_header(actual)
expected_header, expected_code = _split_header(expected)
actual_header = _remove_header_version(actual_header)
expected_header = _remove_header_version(expected_header)
if assert_ is True:
assert actual_header == expected_header
assert actual_code == expected_code
if assert_ is False:
assert actual_header != expected_header or actual_code != expected_code
return actual_header == expected_header and actual_code == expected_code
def _resolve_doc(doc_or_file):
"""If `code_or_file` is a filename then load it as py:light, otherwise return it."""
return _resolve_code(doc_or_file, nb_to_py)
def doc_equal(*args, **kwargs):
"""Check whether generated code files or strings are equivalent.
Arguments must be files or py:light strings.
"""
return code_equal(*args, **kwargs, _resolve=_resolve_doc) # type: ignore
def create_nb(cells=None, metadata=None, nbformat=4, nbformat_minor=4):
return NotebookNode(
metadata=metadata or {},
nbformat=nbformat,
nbformat_minor=nbformat_minor,
cells=cells or [],
)
|
the-stack_0_24196 | import math
class BevelGearAttributes:
def __init__(self, teeth, module, shaftAngle, faceWidth):
self.teeth = teeth
self.module = module
self.shaftAngle = shaftAngle
self.faceWidth = faceWidth
@classmethod
def createGearPair(cls, gearTeeth, pinionTeeth, module, shaftAngle, faceWidth):
gear = cls(gearTeeth, module, shaftAngle, faceWidth)
pinion = cls(gearTeeth, module, shaftAngle, faceWidth)
referenceDiameterPinion = pinionTeeth * module
referenceDiameterGear = gearTeeth * module
referenceConeAnglePinion = math.atan(math.sin(math.radians(shaftAngle)) / (gearTeeth/pinionTeeth + math.cos(math.radians(shaftAngle))))
referenceConeAngleGear = shaftAngle - math.degrees(referenceConeAnglePinion)
coneDistance = referenceConeAngleGear / (2 * math.sin(math.radians(referenceConeAngleGear)))
if (faceWidth > coneDistance / 3):
print("Oh noes.")
_temp = (gearTeeth * math.cos(math.radians(referenceConeAnglePinion)) / pinionTeeth * math.cos(math.radians(referenceConeAngleGear)))
addendumGear = 0.54*module + 0.46*module / _temp
addendumPinion = 2*module - addendumGear
dedendumGear = 2.188 * module - addendumGear
dedendumPinion = 2.188 * module - addendumPinion
return (gear, pinion)
|
the-stack_0_24197 | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import glob
import os
import shutil
from os import path
from setuptools import find_packages, setup
from typing import List
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
assert torch_ver >= [1, 3], "Requires PyTorch >= 1.3"
def get_version():
init_py_path = path.join(path.abspath(path.dirname(__file__)), "mydl", "__init__.py")
init_py = open(init_py_path, "r").readlines()
version_line = [l.strip() for l in init_py if l.startswith("__version__")][0]
version = version_line.split("=")[-1].strip().strip("'\"")
# The following is used to build release packages.
# Users should never use it.
suffix = os.getenv("D2_VERSION_SUFFIX", "")
version = version + suffix
if os.getenv("BUILD_NIGHTLY", "0") == "1":
from datetime import datetime
date_str = datetime.today().strftime("%y%m%d")
version = version + ".dev" + date_str
new_init_py = [l for l in init_py if not l.startswith("__version__")]
new_init_py.append('__version__ = "{}"\n'.format(version))
with open(init_py_path, "w") as f:
f.write("".join(new_init_py))
return version
def get_extensions():
this_dir = path.dirname(path.abspath(__file__))
extensions_dir = path.join(this_dir, "mydl", "layers", "csrc")
main_source = path.join(extensions_dir, "vision.cpp")
sources = glob.glob(path.join(extensions_dir, "**", "*.cpp"))
source_cuda = glob.glob(path.join(extensions_dir, "**", "*.cu")) + glob.glob(
path.join(extensions_dir, "*.cu")
)
sources = [main_source] + sources
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (
torch.cuda.is_available() and CUDA_HOME is not None and os.path.isdir(CUDA_HOME)
) or os.getenv("FORCE_CUDA", "0") == "1":
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
# It's better if pytorch can do this by default ..
CC = os.environ.get("CC", None)
if CC is not None:
extra_compile_args["nvcc"].append("-ccbin={}".format(CC))
include_dirs = [extensions_dir]
ext_modules = [
extension(
"mydl._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
def get_model_zoo_configs() -> List[str]:
"""
Return a list of configs to include in package for model zoo. Copy over these configs inside
mydl/model_zoo.
"""
# Use absolute paths while symlinking.
source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs")
destination = path.join(
path.dirname(path.realpath(__file__)), "mydl", "model_zoo", "configs"
)
# Symlink the config directory inside package to have a cleaner pip install.
# Remove stale symlink/directory from a previous build.
if path.exists(source_configs_dir):
if path.islink(destination):
os.unlink(destination)
elif path.isdir(destination):
shutil.rmtree(destination)
if not path.exists(destination):
try:
os.symlink(source_configs_dir, destination)
except OSError:
# Fall back to copying if symlink fails: ex. on Windows.
shutil.copytree(source_configs_dir, destination)
config_paths = glob.glob("configs/**/*.yaml", recursive=True)
return config_paths
setup(
name="mydl",
version=get_version(),
author="FAIR",
url="https://github.com/facebookresearch/mydl",
description="Detectron2 is FAIR's next-generation research "
"platform for object detection and segmentation.",
packages=find_packages(exclude=("configs", "tests")),
package_data={"mydl.model_zoo": get_model_zoo_configs()},
python_requires=">=3.6",
install_requires=[
"termcolor>=1.1",
"Pillow", # you can also use pillow-simd for better performance
"yacs>=0.1.6",
"tabulate",
"cloudpickle",
"matplotlib",
"tqdm>4.29.0",
"tensorboard",
"fvcore",
"future", # used by caffe2
"pydot", # used to save caffe2 SVGs
],
extras_require={
"all": ["shapely", "psutil"],
"dev": ["flake8", "isort", "black==19.3b0", "flake8-bugbear", "flake8-comprehensions"],
},
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
|
the-stack_0_24198 | from grafana_import.constants import (PKG_NAME, PKG_VERSION)
from setuptools import setup, find_packages
# Global variables
name = PKG_NAME
version = PKG_VERSION
requires = [
'grafana-api',
'jinja2'
]
setup(
name=name,
version=version,
description='A Python-based application to import Grafana dashboards using the Grafana API and grafana-api python interface',
long_description_content_type='text/markdown',
long_description=open('README.md', 'r').read(),
author="author",
author_email="[email protected]",
url="https://github.com/peekjef72/grafana-import-tool",
entry_points={
'console_scripts': [
'grafana-import = grafana_import.cli:main'
]
},
packages=find_packages(),
install_requires=requires,
package_data={'': ['conf/*']},
)
|
the-stack_0_24199 | """
Histogram Equalization Class
"""
import numpy
import math
import copy
import matplotlib.pyplot as plt
class HistogramEqualization:
"""Implements Histogram Equalization"""
imgName = "IMG" #ImageName
colorDepth = 8 #Intensity represented by 8 bits
def __init__(self, pixels, colorDepth=8, imgName="IMG"):
self.pixels = pixels
self.colorDepth = colorDepth
self.imgName = imgName
def evaluate(self):
# Assert pixels is a matrix
assert(type(self.pixels) == numpy.ndarray)
height, width, _ = self.pixels.shape
img = self.pixels.reshape(height*width)
L = 2**self.colorDepth
# Assert color depth is coherent
assert(L > numpy.amax(img))
# Calculation of intesity frequencies
frequency = numpy.zeros(L)
for pixel in img:
frequency[pixel] += 1/(width*height)
# Print histogram of original image
fig_name = self.imgName + "_hist"
self.printHistogram(frequency,fig_name)
# Creation of intensity transformation function
eq_transformation_func = numpy.zeros(L)
for intesity in range(L):
sum_previous = 0
for previous in range(intesity):
sum_previous =+ eq_transformation_func[previous]
eq_transformation_func[intesity] = sum_previous + (L-1) * frequency[intesity]
eq_transformation_func = numpy.around(eq_transformation_func) # Round new intensity values
eq_transformation_func = eq_transformation_func.astype(int) # Transform to integer
# Generation of equalized image from the transformation function
eq_img = eq_transformation_func[img]
# Calculation of equalized intesity frequencies
frequency_eq = numpy.zeros(L)
for pixel in eq_img:
frequency_eq[pixel] += 1/(width*height)
# Print histogram of equalized image
fig_name = self.imgName + "_hist_eq"
self.printHistogram(frequency_eq,fig_name)
result = numpy.array(eq_img).reshape((height, width, 1))
return result
@staticmethod
def printHistogram(frequency, figName):
f = plt.figure()
plt.bar(range(len(frequency)),frequency)
plt.xlabel("Intensity")
plt.ylabel("Frequency")
figName = figName + ".pdf"
f.savefig(figName, bbox_inches='tight')
|
the-stack_0_24202 | import os
import sys
import argparse
import pickle
import week4.retrieval as retrieval
import week4.evaluation as evaluation
import week4.utils as utils
def parse_args(args=sys.argv[2:]):
parser = argparse.ArgumentParser(description='CBIR: Content Based Image Retrieval. MCV-M1-Project, Team 3')
parser.add_argument('bbdd_path', type=str,
help='absolute/relative path to the bbdd dataset')
parser.add_argument('query_path', type=str,
help='absolute/relative path to the query dataset')
parser.add_argument('--test', action='store_true',
help='using a test dataset, so no groundtruth is provided')
parser.add_argument('--map_k', type=lambda s: [int(item) for item in s.split(',')], default=[5],
help='retrieve K number/s of images')
parser.add_argument('--remove_bg', action='store_true',
help='remove background from images in order to extract paintings')
parser.add_argument('--max_paintings', type=int, default=3,
help='maximum number of paintings to extract from an image after removing the background')
parser.add_argument('--remove_text', action='store_true',
help='remove text bounding boxes from images')
parser.add_argument('--remove_noise', action='store_true',
help='remove noise from noisy images')
parser.add_argument('--use_color', action='store_true',
help='use color descriptor')
parser.add_argument('--use_texture', action='store_true',
help='use texture descriptor/s')
parser.add_argument('--use_text', action='store_true',
help='use text descriptor')
parser.add_argument('--color_descriptor', type=str, default='rgb_3d_blocks',
choices=['rgb_3d_blocks', 'rgb_3d_multiresolution'],
help='color descriptor used')
parser.add_argument('--texture_descriptor', type=lambda s: [item for item in s.split(',')], default='dct_blocks',
# choices=['dct_blocks', 'dct_multiresolution', 'lbp_blocks', 'lbp_multiresolution',
# 'hog', 'wavelet', 'hog_blocks', 'hog_multiresolution', 'wavelet_blocks', 'wavelet_multiresolution'],
help='texture descriptor used')
parser.add_argument('--color_weight', type=float, default=0.33,
help='weight for the color descriptor')
parser.add_argument('--texture_weight', type=lambda s: [float(item) for item in s.split(',')], default=[0.33],
help='weight for the texture descriptor')
parser.add_argument('--text_weight', type=float, default=0.0,
help='weight for the text descriptor')
parser.add_argument('--color_metric', type=str, default='hellinger',
choices=['hellinger', 'intersection', 'chi-squared', 'correlation'],
help='distance metric to compare images')
parser.add_argument('--texture_metric', type=lambda s: [item for item in s.split(',')], default=['correlation'],
# choices=['hellinger', 'intersection', 'chi-squared', 'correlation'],
help='distance metric to compare images')
parser.add_argument('--text_metric', type=str, default='Levensthein',
choices=['Levensthein', 'Hamming', 'Damerau'],
help='distance metric to compare images')
parser.add_argument('--number_blocks', type=int, default=16,
help='number of blocks in which the image is divided if using the block-based histograms')
parser.add_argument('--multiresolution_blocks', type=lambda s: [int(item) for item in s.split(',')], default=[1,4,8,16],
help='list of numbers of blocks in which the image is divided if using the multiresolution histograms')
parser.add_argument('--verbose', action='store_true',
help='increase output verbosity: show k similar images per each query image')
parser.add_argument('--use_sift', action='store_true',
help='use SIFT to predict images')
parser.add_argument('--use_orb', action='store_true',
help='use ORB to predict images')
parser.add_argument('--use_surf', action='store_true',
help='use SURF to predict images')
parser.add_argument('--cluster_images', action='store_true',
help='Cluster Images using diff Knn')
args = parser.parse_args(args)
return args
def args_to_params(args):
results_path = os.path.join(args.query_path, 'results')
if not os.path.exists(results_path):
os.makedirs(results_path)
params = {
'lists': None, 'paths': None, 'features': None, 'color': None, 'texture': None, 'text': None, 'remove': None
}
params['paths'] = {
'bbdd': args.bbdd_path,
'query': args.query_path,
'results': results_path
}
if args.use_color:
params['color'] = {
'descriptor': args.color_descriptor,
'weight': args.color_weight,
'metric': args.color_metric
}
if args.use_texture:
params['texture'] = {
'descriptor': args.texture_descriptor,
'weight': args.texture_weight,
'metric': args.texture_metric
}
if args.use_text:
params['text'] = {
'weight': args.text_weight,
'metric': args.text_metric
}
if True in (args.remove_bg, args.remove_text, args.remove_noise):
params['remove'] = {
'bg': args.remove_bg,
'max_paintings': args.max_paintings,
'text': args.remove_text,
'noise': args.remove_noise
}
if True in (args.use_sift, args.use_orb, args.use_surf):
params['features'] = {
'sift': args.use_sift,
'orb': args.use_orb,
'surf': args.use_surf
}
if not True in (args.use_color, args.use_texture, args.use_text,
args.use_sift, args.use_orb, args.use_surf):
sys.exit('[ERROR] No descriptor method specified')
return params
def lists_to_params(params, bbdd_list, query_list):
params['lists'] = {
'bbdd': bbdd_list,
'query': query_list
}
return params
def run():
args = parse_args()
params = args_to_params(args)
print(params)
k = args.map_k
bbdd_list = utils.path_to_list(params['paths']['bbdd'], extension='jpg')
query_list = utils.path_to_list(params['paths']['query'], extension='jpg')
params = lists_to_params(params, bbdd_list, query_list)
paintings_predicted_list = retrieval.get_k_images(params, k=max(k))
utils.save_pickle(os.path.join(params['paths']['results'], 'result.pkl'), paintings_predicted_list)
if not args.test:
evaluation.evaluate(params, k, verbose=args.verbose)
|
the-stack_0_24203 | """
Implements means of exchanging user ID token with temporary access and secret key.
"""
import xml.etree.ElementTree as ET
import requests
from ..exceptions import *
from ..interfaces.providers import *
class Authorize(IProvider):
action = "AssumeRoleWithWebIdentity"
version = "2011-06-15"
namespace = '{https://sts.amazonaws.com/doc/2011-06-15/}'
def __init__(self, config):
self.identity_token = None
self.role_arn = None
self.token_ttl = None
self.role_session_name = None
super(Authorize, self).__init__(config)
def __parse_error(self, response):
"""
Parses the AWS STS xml-based error response, and throws appropriate exception.
:type response: string
:param response: error xml
:rtype : CloudAuthzBaseException (or any of its derived classes)
:return: a CloudAuthz exception w.r.t. AWS STS error code.
"""
root = ET.fromstring(response)
error = root.find('{}Error'.format(self.namespace))
code = error.find('{}Code'.format(self.namespace)).text
message = error.find('{}Message'.format(self.namespace)).text
if code == 'ExpiredTokenException':
return ExpiredTokenException(message)
elif code == 'AccessDenied':
return AccessDeniedException(message)
elif code == 'InvalidIdentityToken':
return InvalidTokenException(message)
else:
return CloudAuthzBaseException(message)
def expand_config(self, config):
"""
Asserts if the config dictionary contains all the
keys necessary for the AWS authorization flow; and
sets all the necessary and optional parameters.
:type config: dict
:param config: a dictionary containing all the
necessary and optional parameters for AWS authorization
flow. The expected keys are as the following:
- identity_token: an OpenID Connect identity
token represented in JSON Web Token (JWT) format.
- role_arn: an Amazon Resource Name (ARN)
of a role to be assumed.
- duration: an integer specifying the
session duration in seconds; ; credentials will
expire after this period. Valid values range
from 900 seconds to 3600 seconds.
- role_session_name: a name assigned to the
session, consisting of lower and upper-case
letters with no space.
:return: all the optional and required parameters.
"""
if 'id_token' not in config:
raise KeyError("`id_token` is not provided.")
self.identity_token = config['id_token']
if 'role_arn' not in config:
raise KeyError("`role_arn` is not provided.")
self.role_arn = config['role_arn']
self.token_ttl = config.get('token_ttl', 900)
self.role_session_name = config.get('role_session_name', 'cloudauthz')
def get_credentials(self):
"""
Assumes an AWS Role and returns credentials accordingly.
:rtype : dict
:return: a dictionary containing credentials to access the resources
available to the assumed role. Credentials are:
- Access Key ID
- Secret Access Key
- Session Token
"""
url = "https://sts.amazonaws.com/?" \
"DurationSeconds={}&" \
"Action={}&Version={}&" \
"RoleSessionName={}&" \
"RoleArn={}&" \
"WebIdentityToken={}"\
.format(self.token_ttl,
self.action,
self.version,
self.role_session_name,
self.role_arn,
self.identity_token)
response = requests.get(url)
if response.ok:
root = ET.fromstring(response.content)
rtv = {}
role_assume_result = root.find('{}AssumeRoleWithWebIdentityResult'.format(self.namespace))
credentials = role_assume_result.find('{}Credentials'.format(self.namespace))
for attribute in credentials:
rtv[attribute.tag.replace(self.namespace, '')] = attribute.text
return rtv
else:
raise self.__parse_error(response.content)
|
the-stack_0_24204 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import wagtail.wagtailcore.fields
import wagtail.wagtailcore.blocks
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0023_alter_page_revision_on_delete_behaviour'),
('home', '0008_auto_20171121_0042'),
]
operations = [
migrations.CreateModel(
name='HomeStream',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('home_body', wagtail.wagtailcore.fields.StreamField([(b'heading', wagtail.wagtailcore.blocks.CharBlock(classname=b'full title')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock()), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock())])),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
the-stack_0_24205 | import os
import re
import string
import urllib.parse
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import requests
from bs4 import BeautifulSoup
import gensim
from keras.layers import (Activation, Conv1D, Dense, Dropout, Embedding,
GlobalMaxPooling1D)
from keras.models import Sequential
from keras.preprocessing import sequence
class WebProcessor:
def __init__(self, query, tags=None, model=None, sim_threshold=0.4):
self.query = query
if tags:
self.tags = tags
else:
self.tags = [
'title', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'ul', 'ol',
'table'
]
if model:
self.model = model
else:
path = 'D:\Word2Vec\GoogleNews-vectors-negative300.bin'
if os.path.isfile(path):
self.model = gensim.models.KeyedVectors.load_word2vec_format(
path, binary=True)
print('Model loaded.')
else:
raise FileNotFoundError(
f"No such file: '{path}'\n"
"Pre-trained word and phrase vectors not found. "
"You can download the file at "
"https://code.google.com/archive/p/word2vec/.")
self.sim_threshold = sim_threshold
def crawl_website(self, url):
self.url = url
try:
r = requests.get(url)
r.raise_for_status()
except Exception:
return None
self.soup = BeautifulSoup(r.text, 'html.parser')
print('Web page loaded..')
def extract_by_tags(self):
tag_text = defaultdict(str)
for tag in self.tags:
for elem in self.soup.find_all(tag):
stripped = elem.text.translate(
str.maketrans(
str.maketrans(dict.fromkeys(string.punctuation))))
stripped = re.sub(r'\s+', ' ', stripped).strip()
if stripped:
tag_text[tag] += f' {stripped.lower()}'.strip()
return dict(tag_text)
def get_hyperlinks(self):
self.links = {}
for link in self.soup.find_all('a'):
url = urllib.parse.urljoin(self.url, link.get('href'))
self.links[url] = ' '.join(
[self.links.get(url, '') + link.text.strip()])
return self.links
def get_tf(self, text):
if not text:
return 0
words = text.split()
num_rel_words = np.zeros(len(self.query))
for word in words:
for idx, topic in enumerate(self.query):
try:
sim = self.model.similarity(topic, word)
# sim = np.random.random()
if sim >= self.sim_threshold:
num_rel_words[idx] += 1
except KeyError:
pass
return np.max(num_rel_words) / len(text)
def get_tfidf(self, tag_text):
tf = np.empty(len(self.tags))
for idx, tag in enumerate(self.tags):
try:
tf[idx] = self.get_tf(tag_text[tag])
except KeyError:
tf[idx] = 0
return tf
def main():
wp = WebProcessor(query=['education', 'university', 'college'])
wp.crawl_website(url='https://illinois.edu/')
links = wp.get_hyperlinks()
tag_text = wp.extract_by_tags()
tf = wp.get_tfidf(tag_text)
print(tf)
if __name__ == '__main__':
main()
|
the-stack_0_24206 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing
import os
import random
import time
import collections
import h5py
import utils
from tokenization import ElectraTokenizer
features = collections.namedtuple("features", ["input_ids", "input_mask", "segment_ids"])
class ExampleBuilder(object):
"""Given a stream of input text, creates pretraining examples."""
def __init__(self, tokenizer, max_length):
self._tokenizer = tokenizer
self._current_sentences = []
self._current_length = 0
self._max_length = max_length
self._target_length = max_length
def add_line(self, line):
"""Adds a line of text to the current example being built."""
line = line.strip().replace("\n", " ")
if (not line) and self._current_length != 0: # empty lines separate docs
return self._create_example()
bert_tokens = self._tokenizer.tokenize(line)
bert_tokids = self._tokenizer.convert_tokens_to_ids(bert_tokens)
self._current_sentences.append(bert_tokids)
self._current_length += len(bert_tokids)
if self._current_length >= self._target_length:
return self._create_example()
return None
def _create_example(self):
"""Creates a pre-training example from the current list of sentences."""
# small chance to only have one segment as in classification tasks
if random.random() < 0.1:
first_segment_target_length = 100000
else:
# -3 due to not yet having [CLS]/[SEP] tokens in the input text
first_segment_target_length = (self._target_length - 3) // 2
first_segment = []
second_segment = []
for sentence in self._current_sentences:
# the sentence goes to the first segment if (1) the first segment is
# empty, (2) the sentence doesn't put the first segment over length or
# (3) 50% of the time when it does put the first segment over length
if (len(first_segment) == 0 or
len(first_segment) + len(sentence) < first_segment_target_length or
(len(second_segment) == 0 and
len(first_segment) < first_segment_target_length and
random.random() < 0.5)):
first_segment += sentence
else:
second_segment += sentence
# trim to max_length while accounting for not-yet-added [CLS]/[SEP] tokens
first_segment = first_segment[:self._max_length - 2]
second_segment = second_segment[:max(0, self._max_length -
len(first_segment) - 3)]
# prepare to start building the next example
self._current_sentences = []
self._current_length = 0
# small chance for random-length instead of max_length-length example
if random.random() < 0.05:
self._target_length = random.randint(5, self._max_length)
else:
self._target_length = self._max_length
return self._make_example(first_segment, second_segment)
def _make_example(self, first_segment, second_segment):
vocab = self._tokenizer.vocab
input_ids = [vocab["[CLS]"]] + first_segment + [vocab["[SEP]"]]
segment_ids = [0] * len(input_ids)
if second_segment:
input_ids += second_segment + [vocab["[SEP]"]]
segment_ids += [1] * (len(second_segment) + 1)
input_mask = [1] * len(input_ids)
input_ids += [0] * (self._max_length - len(input_ids))
input_mask += [0] * (self._max_length - len(input_mask))
segment_ids += [0] * (self._max_length - len(segment_ids))
example = features(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids
)
return example
def features_append(examples, example):
examples["input_ids"].append(example.input_ids)
examples["input_mask"].append(example.input_mask)
examples["segment_ids"].append(example.segment_ids)
class ExampleWriter(object):
"""Writes pre-training examples to disk."""
def __init__(self, job_id, vocab_file, output_dir, max_seq_length,
num_jobs, blanks_separate_docs, do_lower_case,
num_out_files=1000, chunk_size=10000):
self._blanks_separate_docs = blanks_separate_docs
tokenizer = ElectraTokenizer(
vocab_file=vocab_file,
do_lower_case=do_lower_case)
self._example_builder = ExampleBuilder(tokenizer, max_seq_length)
self.examples = []
self._writers = []
for i in range(num_out_files):
if i % num_jobs == job_id:
output_fname = os.path.join(
output_dir, "pretrain_data-{:}-of-{:}.hdf5".format(
i, num_out_files))
self.examples.append({"input_ids": [], "input_mask": [], "segment_ids": []})
self._writers.append(h5py.File(output_fname, "w"))
self.chunk_size = chunk_size
self.max_seq_length = max_seq_length
self.n_written = 0
def write_examples(self, input_file):
"""Writes out examples from the provided input file."""
with open(input_file, "r") as f:
for line in f:
line = line.strip()
if line or self._blanks_separate_docs:
example = self._example_builder.add_line(line)
if example:
features_append(self.examples[self.n_written % len(self._writers)], example)
write_hdf5(
self._writers[self.n_written % len(self._writers)],
self.examples[self.n_written % len(self._writers)],
chunk_size=self.chunk_size, max_seq_length=self.max_seq_length)
self.n_written += 1
example = self._example_builder.add_line("")
if example:
features_append(self.examples[self.n_written % len(self._writers)], example)
write_hdf5(
self._writers[self.n_written % len(self._writers)],
self.examples[self.n_written % len(self._writers)],
chunk_size=0, max_seq_length=self.max_seq_length)
self.n_written += 1
def finish(self):
for examples, writer in zip(self.examples, self._writers):
write_hdf5(writer, examples, chunk_size=0, max_seq_length=self.max_seq_length)
writer.flush()
writer.close()
def write_hdf5(writer, examples, chunk_size, max_seq_length):
if len(examples["input_ids"]) < chunk_size:
return
if "input_ids" not in writer:
writer.create_dataset(
"input_ids", data=examples["input_ids"], dtype='i4', compression='gzip',
maxshape=(None, max_seq_length))
writer.create_dataset(
"input_mask", data=examples["input_mask"], dtype='i1', compression='gzip',
maxshape=(None, max_seq_length))
writer.create_dataset(
"segment_ids", data=examples["segment_ids"], dtype='i1', compression='gzip',
maxshape=(None, max_seq_length))
else:
for key in ["input_ids", "input_mask", "segment_ids"]:
writer[key].resize(writer[key].shape[0] + len(examples[key]), axis=0)
writer[key][-len(examples[key]):] = examples[key]
writer.flush()
for key in ["input_ids", "input_mask", "segment_ids"]:
examples[key] = []
def write_examples(job_id, args):
"""A single process creating and writing out pre-processed examples."""
def log(*args):
msg = " ".join(map(str, args))
print("Job {}:".format(job_id), msg)
log("Creating example writer")
example_writer = ExampleWriter(
job_id=job_id,
vocab_file=args.vocab_file,
output_dir=args.output_dir,
max_seq_length=args.max_seq_length,
num_jobs=args.num_processes,
blanks_separate_docs=args.blanks_separate_docs,
do_lower_case=args.do_lower_case,
num_out_files=args.num_out_files,
chunk_size=args.chunk_size
)
log("Writing hdf5 examples")
fnames = sorted(os.listdir(args.corpus_dir))
fnames = [f for (i, f) in enumerate(fnames)
if i % args.num_processes == job_id]
random.shuffle(fnames)
start_time = time.time()
for file_no, fname in enumerate(fnames):
if file_no > 0:
elapsed = time.time() - start_time
log("processed {:}/{:} files ({:.1f}%), ELAPSED: {:}s, ETA: {:}s, "
"{:} examples written".format(
file_no, len(fnames), 100.0 * file_no / len(fnames), int(elapsed),
int((len(fnames) - file_no) / (file_no / elapsed)),
example_writer.n_written))
example_writer.write_examples(os.path.join(args.corpus_dir, fname))
example_writer.finish()
log("Done!")
# python build_pretraining_dataset --corpus-dir
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--corpus-dir", required=True,
help="Location of pre-training text files.")
parser.add_argument("--vocab-file", required=True,
help="Location of vocabulary file.")
parser.add_argument("--output-dir", required=True,
help="Where to write out the hdf5 files.")
parser.add_argument("--max-seq-length", default=128, type=int,
help="Number of tokens per example.")
parser.add_argument("--num-processes", default=1, type=int,
help="Parallelize across multiple processes.")
parser.add_argument("--blanks-separate-docs", default=True, type=bool,
help="Whether blank lines indicate document boundaries.")
parser.add_argument("--do-lower-case", dest='do_lower_case',
action='store_true', help="Lower case input text.")
parser.add_argument("--no-lower-case", dest='do_lower_case',
action='store_false', help="Don't lower case input text.")
parser.add_argument("--num-out-files", default=1000, type=int,
help="Number of output files.")
parser.add_argument("--seed", default=1314, type=int)
parser.add_argument("--chunk_size", default=1000, type=int)
parser.set_defaults(do_lower_case=True)
args = parser.parse_args()
random.seed(args.seed)
utils.rmkdir(args.output_dir)
if args.num_processes == 1:
write_examples(0, args)
else:
jobs = []
for i in range(args.num_processes):
job = multiprocessing.Process(target=write_examples, args=(i, args))
jobs.append(job)
job.start()
for job in jobs:
job.join()
if __name__ == "__main__":
main()
|
the-stack_0_24208 | from gensim.models import KeyedVectors
import numpy as np
import pandas as pd
import keras
import csv
from sklearn.model_selection import train_test_split
# neural network with keras
from numpy import loadtxt
from keras.models import Sequential
from keras.layers import Dense,BatchNormalization,Dropout
import pickle
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score,roc_curve
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
train_pos_u = np.load('dummy_data/train_pos_u.npy')
train_neg_u = np.load('dummy_data/train_neg_u.npy')
test_pos_u = np.load('dummy_data/test_pos_u.npy')
test_neg_u = np.load('dummy_data/test_neg_u.npy')
train_pos_v = np.load('dummy_data/train_pos_v.npy')
train_neg_v = np.load('dummy_data/train_neg_v.npy')
test_pos_v = np.load('dummy_data/test_pos_v.npy')
test_pos_v=test_pos_v.squeeze()
test_neg_v = np.load('dummy_data/test_neg_v.npy')
node_num2str=pd.read_csv('dummy_data/node_dict.csv')
dict_num2str=dict(zip(node_num2str.int_names, node_num2str.nodes))
sdne_file=open("dummy_data/embed_trainonly_2kepochs.pickle","rb")
sdne_embed=pickle.load(sdne_file)
sdne_file.close()
dim_emb=list(sdne_embed.values())[0].shape[0]
def get_samples(pos_u,pos_v,sdne_embed,dict_num2str):
samples=np.zeros((pos_u.shape[0],dim_emb))
i=0
for x in range(pos_u.shape[0]):
try:
samples[i] = np.multiply(sdne_embed[str(int(pos_u[x]))],sdne_embed[str(int(pos_v[x]))])
i+=1
except:
pass
print("One of the nodes in connection dont have embeddings")
return samples
train_input_pos = get_samples(train_pos_u,train_pos_v,sdne_embed,dict_num2str)
train_pos_y = np.ones((train_pos_u.shape[0],1))
train_input_neg = get_samples(train_neg_u,train_neg_v,sdne_embed,dict_num2str)
train_neg_y = np.zeros((train_neg_u.shape[0],1))
test_input_pos = get_samples(test_pos_u,test_pos_v,sdne_embed,dict_num2str)
test_pos_y = np.ones((test_pos_u.shape[0],1))
test_input_neg = get_samples(test_neg_u,test_neg_v,sdne_embed,dict_num2str)
test_neg_y = np.zeros((test_neg_u.shape[0],1))
X_final = np.concatenate((test_input_pos,test_input_neg))
y_final=np.concatenate((test_pos_y,test_neg_y))
X = np.concatenate((train_input_pos,train_input_neg))
y=np.concatenate((train_pos_y,train_neg_y))
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.15, random_state=3)
model = keras.Sequential(
[
Dense(32, input_dim=128, activation="relu", name="layer1"),
BatchNormalization(),
Dropout(0.2),
Dense(8, activation="relu", name="layer2"),
BatchNormalization(),
Dropout(0.1),
Dense(1, activation="sigmoid", name="output")
]
)
opt = keras.optimizers.Adam(lr=0.001)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
#model_checkpoint=keras.callbacks.ModelCheckpoint(model_name,monitor='val_loss',verbose=1,save_best_only=True,min_delta=0.001)
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss',patience=10,mode='min',min_delta=0.001) # saves only the best ones
red_lr=keras.callbacks.ReduceLROnPlateau(monitor='val_loss',factor=0.1,patience=5, verbose=1, mode='auto',min_lr=1e-7,min_delta=0.001)
history=model.fit(X_train, y_train,validation_split=0.2 ,epochs=50, batch_size=500,callbacks=[early_stopping,red_lr])
y_pred=model.predict_classes(X_test)
_, test_acc = model.evaluate(X_test, y_test, verbose=0)
y_pred_final=model.predict_classes(X_final)
y_prob_final = model.predict_proba(X_final)
_, test_acc_final = model.evaluate(X_final, y_final, verbose=0)
# accuracy: (tp + tn) / (p + n)
accuracy = accuracy_score(y_final, y_pred_final)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_final, y_pred_final)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_final, y_pred_final)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_final, y_pred_final)
print('F1 score: %f' % f1)
# ROC AUC
auc = roc_auc_score(y_final, y_prob_final)
print('ROC AUC: %f' % auc)
# confusion matrix
matrix = confusion_matrix(y_final, y_pred_final)
print(matrix)
fpr, tpr, thresholds = roc_curve(y_final, y_prob_final)
plt.scatter(fpr, tpr, marker='.', label='SDNE embeddings')
# axis labels
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# show the legend
plt.legend()
# show the plot
plt.show()
|
the-stack_0_24209 |
image=cv2.imread('file location')
def encrypt(mess,key):
if len(key)%16 != 0:
a = 16 - len(key)%16
key = key.ljust(len(key)+a)
if len(mess)%16 != 0:
a = 16 - len(mess)%16
mess = mess.ljust(len(mess)+a)
cipher = AES.new(key)
encrypted_data = cipher.encrypt(mess)
return encrypted_data
def correlation(M1,M2):
li,wi = M1.shape
ls,ws = M2.shape
lo = li + ls -1
wo = wi + ws -1
g = np.zeros([li+ls*2-2,wi+ws*2-2])
g[ls-1:li+ls-1,ws-1:wi+ws-1] = M1
out = np.zeros([lo,wo])
for x in range(lo):
for y in range(wo):
C = np.multiply(g[x:x+ls,y:y+ws],M2)
out[x,y] = np.sum(C)
return out
def gaussian_filter(dim):
sigma = math.sqrt(dim)
fil = np.zeros((dim,dim))
k=dim
m = int((dim/2)+1)
for i in range(int((dim/2)+1)):
for j in range(int((dim/2)+1)):
fil[i,j]=np.exp(-((((m-i-1)*(m-i-1))+((m-j-1)*(m-j-1)))/(2*sigma*sigma)))/(2*np.pi*sigma**2)
fil[i,k-j-1]=fil[k-i-1,j]=fil[k-i-1,k-j-1]=fil[i,j]
s = np.sum(fil)
fil = fil/s
return fil
def sobelfilter(s):
filter1 = gaussian_filter(3)
s = correlation(s,filter1)
sobelxy = cv2.Sobel(src=s, ddepth=cv2.CV_8U, dx=1, dy=1, ksize=3)
return sobelxy
def image_steg(img,mess,key):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edge_img = sobelfilter(img_gray)
indices = np.where(edge_img != 0)
f_points = np.column_stack((indices[0],indices[1]))
no_edge , r = f_points.shape
en_mess = str(encrypt(mess,key))
f = np.array([])
le = np.array([])
n = 0
for i in en_mess:
arr = " ".join(f"{ord(x):08b}" for x in i)
for j in arr:
f = np.append(f,int(j))
l = str(len(en_mess)*8)
l = l + "/"
arr = " ".join(f"{ord(x):08b}" for x in l)
for j in arr:
if j!=' ':
le = np.append(le,int(j))
for i in le:
x,y = f_points[n]
pix_val = img_gray[x,y]
if (pix_val%2 == i):
img_gray[x,y] = img_gray[x,y]
else:
img_gray[x,y] = img_gray[x,y]-1
#img_gray = pix_encode(f_points[n],img_gray,i)
n = n+1
n = 82
for i in f:
x,y = f_points[n]
pix_val = img_gray[x,y]
if (pix_val%2 == i):
img_gray[x,y] = img_gray[x,y]
else:
img_gray[x,y] = img_gray[x,y]-1
#img_gray = pix_encode(f_points[n],img_gray,i)
n = n+1
print(n)
return img_gray
message = "Message to encrypt"
key = "key to encrypt the message"
test_image=image_steg(image,message,key)
|
the-stack_0_24210 |
# parameters.py
"""
Exp 10 - {'Initial_genes': '5000', 'Host_mutation_rate': '0.30', 'TE_progeny': '0.15, 0, 0.55, 1, 0.30, 2', 'TE_Insertion_Distribution': 'Flat()', 'Carrying_capacity': '30', 'TE_excision_rate': '0.1', 'Junk_BP': '1.4', 'Gene_Insertion_Distribution': 'Flat()', 'mutation_effect': '0.01', 'TE_death_rate': '0.005'}
"""
from TEUtil import *;
# note that "#" indicates a comment
# set the following to True if you want messages printed to the screen
# while the program runs - search for these keywords in TESim.py to see
# what each one prints out
output = {
"SPLAT": False,
"SPLAT FITNESS": False,
"INITIALIZATION": False,
"GENERATION": True,
"HOST EXTINCTION": True,
"TE EXTINCTION": True,
"TRIAL NO": True,
"GENE INIT": False,
"TE INIT": False,
};
TE_Insertion_Distribution = Flat();
Gene_Insertion_Distribution = Flat();
# Triangle( pmax, pzero ) generates values between pmax and pzero with
# a triangular probability distribution, where pmax is the point of highest
# probability, and pzero is the point of lowest probability
# - you can change the orientation of the triangle by reversing the values
# of pmax and pzero
# Flat() generates values between 0 and 1 with uniform probability
Gene_length = 1000; # use 1000?
TE_length = 1000; # use 1000?
TE_death_rate = 0.005;
TE_excision_rate = 0.1; # set this to zero for retro transposons
# for retro transposons this is the probability of the given number of progeny
# for dna transposons this is the probability of the given number of progeny
# ___PLUS___ the original re-inserting
TE_progeny = ProbabilityTable( 0.15, 0, 0.55, 1, 0.30, 2 );
Initial_genes = 5000;
Append_gene = True; # True: when the intialization routine tries to place
# a gene inside another gene, it instead appends it
# at the end of the original gene (use this with small
# amounts of Junk_BP).
# False: when the intialization routine tries to place
# a gene inside another gene, try to place it somewhere
# else again (don't use theis option with samll amounts
# of Junk_BP).
Initial_TEs = 1;
MILLION = 1000000;
Junk_BP = 1.4 * MILLION;
Host_start_fitness = 1.0;
Host_mutation_rate = 0.30;
Host_mutation = ProbabilityTable( 0.40, lambda fit: 0.0,
0.30, lambda fit: fit - random.random()*0.01,
0.15, lambda fit: fit,
0.15, lambda fit: fit + random.random()*0.01
);
# what happens when a TA hits a gene
Insertion_effect = ProbabilityTable(0.30, lambda fit: 0.0,
0.20, lambda fit: fit - random.random()*0.01,
0.30, lambda fit: fit,
0.20, lambda fit: fit + random.random()*0.01
);
Carrying_capacity = 30;
Host_reproduction_rate = 1; # how many offspring each host has
Host_survival_rate = lambda propfit: min( Carrying_capacity * propfit, 0.95 );
# propfit = proportion of fitness owned by this individual
Maximum_generations = 1500;
Terminate_no_TEs = True; # end simulation if there are no TEs left
# seed = 0;
seed = None; # if seed = None, the random number generator's initial state is
# set "randomly"
save_frequency = 50; # Frequency with with which to save state of experiment
saved = None; # if saved = None then we start a new simulation from scratch
# if saves = string, then we open that file and resume a simulation
|
the-stack_0_24211 | import os
import subprocess
import sys
from time import sleep
from typing import List
from telegram import Bot, Update, TelegramError
from telegram.ext import CommandHandler, run_async
from Manager import dispatcher
from Manager.modules.helper_funcs.chat_status import dev_plus
@run_async
@dev_plus
def leave(bot: Bot, update: Update, args: List[str]):
if args:
chat_id = str(args[0])
try:
bot.leave_chat(int(chat_id))
update.effective_message.reply_text("Beep boop, I left that soup!.")
except TelegramError:
update.effective_message.reply_text("Beep boop, I could not leave that group(dunno why tho).")
else:
update.effective_message.reply_text("Send a valid chat ID")
@run_async
@dev_plus
def gitpull(bot: Bot, update: Update):
sent_msg = update.effective_message.reply_text("Pulling all changes from remote and then attempting to restart.")
subprocess.Popen('git pull', stdout=subprocess.PIPE, shell=True)
sent_msg_text = sent_msg.text + "\n\nChanges pulled...I guess.. Restarting in "
for i in reversed(range(5)):
sent_msg.edit_text(sent_msg_text + str(i + 1))
sleep(1)
sent_msg.edit_text("Restarted.")
os.system('restart.bat')
os.execv('start.bat', sys.argv)
@run_async
@dev_plus
def restart(bot: Bot, update: Update):
update.effective_message.reply_text("Starting a new instance and shutting down this one")
os.system('restart.bat')
os.execv('start.bat', sys.argv)
LEAVE_HANDLER = CommandHandler("leave", leave, pass_args=True)
GITPULL_HANDLER = CommandHandler("gitpull", gitpull)
RESTART_HANDLER = CommandHandler("reboot", restart)
dispatcher.add_handler(LEAVE_HANDLER)
dispatcher.add_handler(GITPULL_HANDLER)
dispatcher.add_handler(RESTART_HANDLER)
__mod_name__ = "Dev"
__handlers__ = [LEAVE_HANDLER, GITPULL_HANDLER, RESTART_HANDLER]
|
the-stack_0_24213 | import cgi
import functools
import pathlib
import re
import sys
import traceback
import warnings
from contextlib import contextmanager
from copy import copy, deepcopy
from inspect import getfullargspec
from json import JSONDecodeError
from typing import (
Any,
Callable,
Dict,
Generator,
Generic,
List,
NoReturn,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import pytest
import requests
import yaml
import yarl
from hypothesis.core import is_invalid_test
from hypothesis.reporting import with_reporter
from hypothesis.strategies import SearchStrategy
from requests.auth import HTTPDigestAuth
from requests.exceptions import InvalidHeader # type: ignore
from requests.utils import check_header_validity # type: ignore
from werkzeug.wrappers import Response as BaseResponse
from ._compat import InferType, JSONMixin
from .constants import USER_AGENT, DataGenerationMethod
from .exceptions import UsageError
from .types import DataGenerationMethodInput, Filter, GenericTest, NotSet, RawAuth
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
# pylint: disable=unused-import
from yaml import SafeLoader # type: ignore
NOT_SET = NotSet()
def file_exists(path: str) -> bool:
try:
return pathlib.Path(path).is_file()
except OSError:
# For example, path could be too long
return False
def is_latin_1_encodable(value: str) -> bool:
"""Header values are encoded to latin-1 before sending."""
try:
value.encode("latin-1")
return True
except UnicodeEncodeError:
return False
# Adapted from http.client._is_illegal_header_value
INVALID_HEADER_RE = re.compile(r"\n(?![ \t])|\r(?![ \t\n])") # pragma: no mutate
def has_invalid_characters(name: str, value: str) -> bool:
try:
check_header_validity((name, value))
return bool(INVALID_HEADER_RE.search(value))
except InvalidHeader:
return True
def is_schemathesis_test(func: Callable) -> bool:
"""Check whether test is parametrized with schemathesis."""
try:
from .schemas import BaseSchema # pylint: disable=import-outside-toplevel
item = getattr(func, PARAMETRIZE_MARKER, None)
# Comparison is needed to avoid false-positives when mocks are collected by pytest
return isinstance(item, BaseSchema)
except Exception:
return False
def fail_on_no_matches(node_id: str) -> NoReturn: # type: ignore
pytest.fail(f"Test function {node_id} does not match any API operations and therefore has no effect")
def force_tuple(item: Filter) -> Union[List, Set, Tuple]:
if not isinstance(item, (list, set, tuple)):
return (item,)
return item
def dict_true_values(**kwargs: Any) -> Dict[str, Any]:
"""Create a dict with given kwargs while skipping items where bool(value) evaluates to False."""
return {key: value for key, value in kwargs.items() if bool(value)}
def dict_not_none_values(**kwargs: Any) -> Dict[str, Any]:
return {key: value for key, value in kwargs.items() if value is not None}
IGNORED_PATTERNS = (
"Falsifying example: ",
"Falsifying explicit example: ",
"You can add @seed",
"Failed to reproduce exception. Expected:",
"Flaky example!",
"Traceback (most recent call last):",
"You can reproduce this example by temporarily",
"Unreliable test timings",
)
@contextmanager
def capture_hypothesis_output() -> Generator[List[str], None, None]:
"""Capture all output of Hypothesis into a list of strings.
It allows us to have more granular control over Schemathesis output.
Usage::
@given(i=st.integers())
def test(i):
assert 0
with capture_hypothesis_output() as output:
test() # hypothesis test
# output == ["Falsifying example: test(i=0)"]
"""
output = []
def get_output(value: str) -> None:
# Drop messages that could be confusing in the Schemathesis context
if value.startswith(IGNORED_PATTERNS):
return
output.append(value)
# the following context manager is untyped
with with_reporter(get_output): # type: ignore
yield output
def format_exception(error: Exception, include_traceback: bool = False) -> str:
"""Format exception as text."""
error_type = type(error)
if include_traceback:
lines = traceback.format_exception(error_type, error, error.__traceback__)
else:
lines = traceback.format_exception_only(error_type, error)
return "".join(lines)
def parse_content_type(content_type: str) -> Tuple[str, str]:
"""Parse Content Type and return main type and subtype."""
try:
content_type, _ = cgi.parse_header(content_type)
main_type, sub_type = content_type.split("/", 1)
except ValueError as exc:
raise ValueError(f"Malformed media type: `{content_type}`") from exc
return main_type.lower(), sub_type.lower()
def is_json_media_type(value: str) -> bool:
"""Detect whether the content type is JSON-compatible.
For example - ``application/problem+json`` matches.
"""
main, sub = parse_content_type(value)
return main == "application" and (sub == "json" or sub.endswith("+json"))
def is_plain_text_media_type(value: str) -> bool:
"""Detect variations of the ``text/plain`` media type."""
return parse_content_type(value) == ("text", "plain")
def make_loader(*tags_to_remove: str) -> Type[yaml.SafeLoader]:
"""Create a YAML loader, that doesn't parse specific tokens into Python objects."""
cls: Type[yaml.SafeLoader] = type("YAMLLoader", (SafeLoader,), {})
cls.yaml_implicit_resolvers = {
key: [(tag, regexp) for tag, regexp in mapping if tag not in tags_to_remove]
for key, mapping in cls.yaml_implicit_resolvers.copy().items()
}
# Fix pyyaml scientific notation parse bug
# See PR: https://github.com/yaml/pyyaml/pull/174 for upstream fix
cls.add_implicit_resolver( # type: ignore
"tag:yaml.org,2002:float",
re.compile(
r"""^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\.[0-9_]+(?:[eE][-+]?[0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$""",
re.X,
),
list("-+0123456789."),
)
return cls
StringDatesYAMLLoader = make_loader("tag:yaml.org,2002:timestamp")
class WSGIResponse(BaseResponse, JSONMixin): # pylint: disable=too-many-ancestors
# We store "requests" request to build a reproduction code
request: requests.PreparedRequest
def on_json_loading_failed(self, e: JSONDecodeError) -> NoReturn:
# We don't need a werkzeug-specific exception when JSON parsing error happens
raise e
def get_requests_auth(auth: Optional[RawAuth], auth_type: Optional[str]) -> Optional[Union[HTTPDigestAuth, RawAuth]]:
if auth and auth_type == "digest":
return HTTPDigestAuth(*auth)
return auth
GenericResponse = Union[requests.Response, WSGIResponse] # pragma: no mutate
def copy_response(response: GenericResponse) -> GenericResponse:
"""Create a copy of the given response as far as it makes sense."""
if isinstance(response, requests.Response):
copied_response = deepcopy(response)
setattr(copied_response, "raw", response.raw)
return copied_response
# Can't deepcopy WSGI response due to generators inside (`response.freeze` doesn't completely help)
response.freeze()
copied_response = copy(response)
copied_response.request = deepcopy(response.request)
return copied_response
def get_response_payload(response: GenericResponse) -> str:
if isinstance(response, requests.Response):
return response.text
return response.get_data(as_text=True)
def import_app(path: str) -> Any:
"""Import an application from a string."""
path, name = (re.split(r":(?![\\/])", path, 1) + [""])[:2]
__import__(path)
# accessing the module from sys.modules returns a proper module, while `__import__`
# may return a parent module (system dependent)
module = sys.modules[path]
return getattr(module, name)
Schema = Union[Dict[str, Any], List, str, float, int]
@overload
def traverse_schema(schema: Dict[str, Any], callback: Callable, *args: Any, **kwargs: Any) -> Dict[str, Any]:
pass
@overload
def traverse_schema(schema: List, callback: Callable, *args: Any, **kwargs: Any) -> List:
pass
@overload
def traverse_schema(schema: str, callback: Callable, *args: Any, **kwargs: Any) -> str:
pass
@overload
def traverse_schema(schema: float, callback: Callable, *args: Any, **kwargs: Any) -> float:
pass
def traverse_schema(schema: Schema, callback: Callable[..., Dict[str, Any]], *args: Any, **kwargs: Any) -> Schema:
"""Apply callback recursively to the given schema."""
if isinstance(schema, dict):
schema = callback(schema, *args, **kwargs)
for key, sub_item in schema.items():
schema[key] = traverse_schema(sub_item, callback, *args, **kwargs)
elif isinstance(schema, list):
schema = [traverse_schema(sub_item, callback, *args, **kwargs) for sub_item in schema]
return schema
def _warn_deprecation(*, thing: str, removed_in: str, replacement: str) -> None:
warnings.warn(
f"Property `{thing}` is deprecated and will be removed in Schemathesis {removed_in}. "
f"Use `{replacement}` instead.",
DeprecationWarning,
)
def deprecated_property(*, removed_in: str, replacement: str) -> Callable:
def wrapper(prop: Callable) -> Callable:
@property # type: ignore
def inner(self: Any) -> Any:
_warn_deprecation(thing=prop.__name__, removed_in=removed_in, replacement=replacement)
return prop(self)
return inner
return wrapper
def deprecated(*, removed_in: str, replacement: str) -> Callable:
def wrapper(func: Callable) -> Callable:
def inner(*args: Any, **kwargs: Any) -> Any:
_warn_deprecation(thing=func.__name__, removed_in=removed_in, replacement=replacement)
return func(*args, **kwargs)
return inner
return wrapper
def setup_headers(kwargs: Dict[str, Any]) -> None:
headers = kwargs.setdefault("headers", {})
if "user-agent" not in {header.lower() for header in headers}:
kwargs["headers"]["User-Agent"] = USER_AGENT
def require_relative_url(url: str) -> None:
"""Raise an error if the URL is not relative."""
if yarl.URL(url).is_absolute():
raise ValueError("Schema path should be relative for WSGI/ASGI loaders")
T = TypeVar("T")
E = TypeVar("E", bound=Exception)
class Ok(Generic[T]):
__slots__ = ("_value",)
def __init__(self, value: T):
self._value = value
def ok(self) -> T:
return self._value
class Err(Generic[E]):
__slots__ = ("_error",)
def __init__(self, error: E):
self._error = error
def err(self) -> E:
return self._error
Result = Union[Ok[T], Err[E]]
GivenInput = Union[SearchStrategy, InferType]
PARAMETRIZE_MARKER = "_schemathesis_test"
GIVEN_ARGS_MARKER = "_schemathesis_given_args"
GIVEN_KWARGS_MARKER = "_schemathesis_given_kwargs"
def get_given_args(func: GenericTest) -> Tuple:
return getattr(func, GIVEN_ARGS_MARKER, ())
def get_given_kwargs(func: GenericTest) -> Dict[str, Any]:
return getattr(func, GIVEN_KWARGS_MARKER, {})
def is_given_applied(func: GenericTest) -> bool:
return hasattr(func, GIVEN_ARGS_MARKER) or hasattr(func, GIVEN_KWARGS_MARKER)
def given_proxy(*args: GivenInput, **kwargs: GivenInput) -> Callable[[GenericTest], GenericTest]:
"""Proxy Hypothesis strategies to ``hypothesis.given``."""
def wrapper(func: GenericTest) -> GenericTest:
if hasattr(func, GIVEN_ARGS_MARKER):
def wrapped_test(*_: Any, **__: Any) -> NoReturn:
raise UsageError(
f"You have applied `given` to the `{func.__name__}` test more than once, which "
"overrides the previous decorator. You need to pass all arguments to the same `given` call."
)
return wrapped_test
setattr(func, GIVEN_ARGS_MARKER, args)
setattr(func, GIVEN_KWARGS_MARKER, kwargs)
return func
return wrapper
def merge_given_args(func: GenericTest, args: Tuple, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""Merge positional arguments to ``@schema.given`` into a dictionary with keyword arguments.
Kwargs are modified inplace.
"""
if args:
argspec = getfullargspec(func)
for name, strategy in zip(reversed([arg for arg in argspec.args if arg != "case"]), reversed(args)):
kwargs[name] = strategy
return kwargs
def validate_given_args(func: GenericTest, args: Tuple, kwargs: Dict[str, Any]) -> Optional[Callable]:
argspec = getfullargspec(func)
return is_invalid_test(func, argspec, args, kwargs) # type: ignore
def compose(*functions: Callable) -> Callable:
"""Compose multiple functions into a single one."""
def noop(x: Any) -> Any:
return x
return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, noop)
def maybe_set_assertion_message(exc: AssertionError, check_name: str) -> str:
message = str(exc)
if not message:
message = f"Check '{check_name}' failed"
exc.args = (message,)
return message
def prepare_data_generation_methods(data_generation_methods: DataGenerationMethodInput) -> List[DataGenerationMethod]:
if isinstance(data_generation_methods, DataGenerationMethod):
return [data_generation_methods]
return list(data_generation_methods)
def merge(a: Dict[str, Any], b: Dict[str, Any]) -> Dict[str, Any]:
"""Merge two dictionaries recursively."""
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key])
else:
a[key] = b[key]
else:
a[key] = b[key]
return a
|
the-stack_0_24214 | from sympy.core.numbers import igcd
from primetest import isprime
def totient_(n):
"""returns the number of integers less than n
and relatively prime to n"""
if n < 1:
raise ValueError("n must be a positive integer")
tot=0
for x in xrange(1,n):
if igcd(x,n)==1:
tot+=1
return tot
def n_order(a,n):
""" returns the order of a modulo n
Order of a modulo n is the smallest integer
k such that a^k leaves a remainder of 1 with n.
"""
assert igcd(a,n)==1
if a>n : a=a%n
for x in xrange(1,totient_(n)+1):
if (a**x)%n==1:
return x
def is_primitive_root(a,p):
"""
returns True if a is a primitive root of p
"""
assert igcd(a,p) == 1,"The two numbers should be relatively prime"
if a>p:
a=a%p
if n_order(a,p)==totient_(p):
return True
else:
return False
def is_quad_residue(a,p):
"""
returns True if a is a quadratic residue of p
p should be a prime and a should be relatively
prime to p
"""
assert isprime(p) and p!=2,"p should be an odd prime"
assert igcd(a,p)==1,"The two numbers should be relatively prime"
if a>p:
a=a%p
rem=(a**((p-1)//2))%p # a^(p-1 / 2) % p
if rem==1: return True
else : return False
def legendre_symbol(a,p):
"""
return 1 if a is a quadratic residue of p
else return -1
p should be an odd prime by definition
"""
assert isprime(p) and p!=2,"p should be an odd prime"
assert igcd(a,p)==1,"The two numbers should be relatively prime"
if a>p:
a=a%p
if is_quad_residue(a,p)==True: return 1
else : return -1
|
the-stack_0_24216 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Primitive operator classes.
A collection of operators to build neural networks or to compute functions.
"""
from .image_ops import (CropAndResize)
from .array_ops import (Argmax, Argmin, Cast, Concat, Pack, Stack, Unpack, Unstack,
Diag, DiagPart, DType, ExpandDims, Eye,
Fill, Ones, Zeros, GatherNd, GatherV2, Gather, SparseGatherV2, InvertPermutation,
IsInstance, IsSubClass, ArgMaxWithValue, OnesLike, ZerosLike,
Rank, Reshape, ResizeNearestNeighbor, ArgMinWithValue, Meshgrid,
SameTypeShape, ScatterAdd, ScatterSub, ScatterMul, ScatterDiv, ScatterMax, ScatterMin,
ScatterUpdate, ScalarToArray, ScalarToTensor, ScatterNd, ScatterNdUpdate, Select,
Shape, DynamicShape, Size, Slice, Split, TransShape, ParallelConcat, Padding, UniqueWithPad,
ScatterNdAdd, ScatterNdSub, ScatterNonAliasingAdd, ReverseV2, Rint,
Squeeze, StridedSlice, Tile, TensorScatterUpdate, TensorScatterAdd, EditDistance, Sort,
Transpose, TruncatedNormal, TupleToArray, UnsortedSegmentMin, UnsortedSegmentMax,
UnsortedSegmentProd, UnsortedSegmentSum, SpaceToDepth, DepthToSpace, SpaceToBatch,
BatchToSpace, SpaceToBatchND, BatchToSpaceND, BroadcastTo, InplaceUpdate, ReverseSequence,
EmbeddingLookup, Unique, GatherD, Identity, Range, MaskedSelect)
from .comm_ops import (AllGather, AllReduce, _AlltoAll, AllSwap, ReduceScatter, Broadcast,
_MirrorOperator, _MirrorMiniStepOperator, _MiniStepAllGather, ReduceOp, _VirtualDataset,
_VirtualOutput, _VirtualDiv, _GetTensorSlice, _VirtualAdd,
_HostAllGather, _HostReduceScatter)
from .debug_ops import (ImageSummary, InsertGradientOf, HookBackward, ScalarSummary,
TensorSummary, HistogramSummary, Print, Assert)
from .control_ops import GeSwitch, Merge
from .inner_ops import (ScalarCast, Randperm, NoRepeatNGram, LambApplyOptimizerAssign, LambApplyWeightAssign,
MakeRefKey,
FusedWeightScaleApplyMomentum, AdamWeightDecay)
from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AccumulateNV2, AssignAdd, AssignSub, Atan2, BatchMatMul,
BitwiseAnd, BitwiseOr,
BitwiseXor, Inv, Invert, ApproximateEqual, InplaceAdd, InplaceSub,
ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, ReduceAny,
Cos, Div, DivNoNan, Equal, EqualCount, Exp, Expm1, Erf, Erfc, Floor, FloorDiv, FloorMod, Ceil,
Acosh, Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd, Mod,
LogicalNot, LogicalOr, MatMul, Maximum, MulNoNan,
Minimum, Mul, Neg, NMSWithMask, NotEqual,
NPUAllocFloatStatus, NPUClearFloatStatus, LinSpace,
NPUGetFloatStatus, Pow, RealDiv, IsNan, IsInf, IsFinite, FloatStatus,
Reciprocal, CumSum, HistogramFixedWidth, SquaredDifference, Xdivy, Xlogy,
Sin, Sqrt, Rsqrt, BesselI0e, BesselI1e, TruncateDiv, TruncateMod,
Square, Sub, TensorAdd, Add, Sign, Round, SquareSumAll, Atan, Atanh, Cosh, Sinh, Eps, Tan,
MatrixInverse, IndexAdd)
from .random_ops import (RandomChoiceWithMask, StandardNormal, Gamma, Poisson, UniformInt, UniformReal,
RandomCategorical, StandardLaplace, Multinomial, UniformCandidateSampler,
LogUniformCandidateSampler)
from .nn_ops import (LSTM, SGD, Adam, FusedSparseAdam, FusedSparseLazyAdam, AdamNoUpdateParam, ApplyMomentum,
BatchNorm, BiasAdd, Conv2D, Conv3D, Conv2DTranspose, Conv3DTranspose,
DepthwiseConv2dNative,
DropoutDoMask, Dropout, Dropout2D, Dropout3D, DropoutGenMask, Flatten,
InstanceNorm, BNTrainingReduce, BNTrainingUpdate,
GeLU, Gelu, FastGeLU, FastGelu, Elu,
GetNext, L2Normalize, LayerNorm, L2Loss, CTCLoss, CTCGreedyDecoder,
LogSoftmax, MaxPool3D, AvgPool3D,
MaxPool, DataFormatDimMap,
AvgPool, Conv2DBackpropInput, ComputeAccidentalHits,
MaxPoolWithArgmax, OneHot, Pad, MirrorPad, Mish, PReLU, ReLU, ReLU6, ReLUV2, HSwish, HSigmoid,
ResizeBilinear, Sigmoid, SeLU,
SigmoidCrossEntropyWithLogits, NLLLoss, BCEWithLogitsLoss,
SmoothL1Loss, Softmax, Softsign, Softplus, LRN, RNNTLoss, DynamicRNN, DynamicGRUV2,
SoftmaxCrossEntropyWithLogits, ROIAlign,
SparseSoftmaxCrossEntropyWithLogits, Tanh,
TopK, BinaryCrossEntropy, KLDivLoss, SparseApplyAdagrad, LARSUpdate, ApplyFtrl, SparseApplyFtrl,
ApplyProximalAdagrad, SparseApplyProximalAdagrad, SparseApplyAdagradV2, SparseApplyFtrlV2,
FusedSparseFtrl, FusedSparseProximalAdagrad,
ApplyAdaMax, ApplyAdadelta, ApplyAdagrad, ApplyAdagradV2,
ApplyAddSign, ApplyPowerSign, ApplyGradientDescent, ApplyProximalGradientDescent,
ApplyRMSProp, ApplyCenteredRMSProp, BasicLSTMCell, InTopK, AdaptiveAvgPool2D)
from . import _quant_ops
from ._quant_ops import *
from .other_ops import (Assign, InplaceAssign, IOU, BoundingBoxDecode, BoundingBoxEncode,
ConfusionMatrix, PopulationCount, UpdateState, Load,
CheckValid, Partial, Depend, identity, CheckBprop, Push, Pull, PullWeight, PushWeight)
from ._thor_ops import (CusBatchMatMul, CusCholeskyTrsm, CusFusedAbsMax1, CusImg2Col, CusMatMulCubeDenseLeft,
CusMatMulCubeFraczRightMul, CusMatMulCube, CusMatrixCombine, CusTranspose02314,
CusMatMulCubeDenseRight,
CusMatMulCubeFraczLeftCast, Im2Col, UpdateThorGradient, Cholesky, CholeskyTrsm, DetTriangle,
ProdForceSeA)
from .sparse_ops import (SparseToDense, SparseTensorDenseMatmul)
from ._embedding_cache_ops import (CacheSwapTable, UpdateCache, MapCacheIdx, SubAndFilter,
MapUniform, DynamicAssign, PadAndShift)
from .quantum_ops import PQC, Evolution
from .sponge_ops import (BondForce, BondEnergy, BondAtomEnergy, BondForceWithAtomEnergy, BondForceWithAtomVirial,
DihedralForce, DihedralEnergy, DihedralAtomEnergy, DihedralForceWithAtomEnergy, AngleForce,
AngleEnergy, AngleAtomEnergy, AngleForceWithAtomEnergy, PMEReciprocalForce,
LJForce, LJEnergy, LJForceWithPMEDirectForce, PMEExcludedForce, PMEEnergy, Dihedral14LJForce,
Dihedral14LJForceWithDirectCF, Dihedral14LJEnergy, Dihedral14LJCFForceWithAtomEnergy,
Dihedral14LJAtomEnergy, Dihedral14CFEnergy, Dihedral14CFAtomEnergy, MDIterationLeapFrog,
GetCenterOfGeometry, MDTemperature, NeighborListUpdate, MDIterationLeapFrogLiujian,
CrdToUintCrd, MDIterationSetupRandState, TransferCrd)
__all__ = [
'Unique',
'ReverseSequence',
'Sort',
'EditDistance',
'CropAndResize',
'Add',
'TensorAdd',
'Argmax',
'Argmin',
'MaxPool3D',
'AvgPool3D',
'ArgMaxWithValue',
'ArgMinWithValue',
'AddN',
'AccumulateNV2',
'Sub',
'CumSum',
'MatMul',
'BatchMatMul',
'Mul',
'Meshgrid',
'Pow',
'Exp',
'Expm1',
'Rsqrt',
'Sqrt',
'Square',
'DynamicGRUV2',
'SquaredDifference',
'Xdivy',
'Xlogy',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
'Conv3DTranspose',
'Flatten',
'MaxPoolWithArgmax',
'BNTrainingReduce',
'BNTrainingUpdate',
'BatchNorm',
'MaxPool',
'TopK',
'LinSpace',
'Adam',
'AdamWeightDecay',
'FusedSparseAdam',
'FusedSparseLazyAdam',
'AdamNoUpdateParam',
'Softplus',
'Softmax',
'Softsign',
'LogSoftmax',
'SoftmaxCrossEntropyWithLogits',
'BCEWithLogitsLoss',
'ROIAlign',
'SparseSoftmaxCrossEntropyWithLogits',
'NLLLoss',
'SGD',
'ApplyMomentum',
'FusedWeightScaleApplyMomentum',
'ExpandDims',
'Cast',
'IsSubClass',
'IsInstance',
'Reshape',
'Squeeze',
'Transpose',
'OneHot',
'GatherV2',
'Gather',
'SparseGatherV2',
'EmbeddingLookup',
'Padding',
'GatherD',
'Identity',
'UniqueWithPad',
'Concat',
'Pack',
'Stack',
'Unpack',
'Unstack',
'Tile',
'BiasAdd',
'GeLU',
'Gelu',
'FastGeLU',
'FastGelu',
'Minimum',
'Maximum',
'StridedSlice',
'ReduceSum',
'ReduceMean',
'LayerNorm',
'Rank',
'Less',
'LessEqual',
'RealDiv',
'Div',
'DivNoNan',
'Inv',
'Invert',
'TruncatedNormal',
'Fill',
'Ones',
'Zeros',
'OnesLike',
'ZerosLike',
'Select',
'Split',
'Mish',
'SeLU',
'MulNoNan',
'ReLU',
'ReLU6',
'Elu',
'Erf',
'Erfc',
'Sigmoid',
'HSwish',
'HSigmoid',
'Tanh',
'NoRepeatNGram',
'Randperm',
'RandomChoiceWithMask',
'StandardNormal',
'Multinomial',
'Gamma',
'Poisson',
'UniformInt',
'UniformReal',
'StandardLaplace',
'RandomCategorical',
'ResizeBilinear',
'ScalarSummary',
'ImageSummary',
'TensorSummary',
'HistogramSummary',
"Print",
"Assert",
'InsertGradientOf',
'HookBackward',
'InvertPermutation',
'Shape',
'DynamicShape',
'DropoutDoMask',
'DropoutGenMask',
'Dropout',
'Dropout2D',
'Dropout3D',
'Neg',
'InplaceAdd',
'InplaceSub',
'Slice',
'DType',
'NPUAllocFloatStatus',
'NPUGetFloatStatus',
'NPUClearFloatStatus',
'IsNan',
'IsFinite',
'IsInf',
'FloatStatus',
'Reciprocal',
'SmoothL1Loss',
'L2Loss',
'CTCLoss',
'CTCGreedyDecoder',
'RNNTLoss',
'DynamicRNN',
'ReduceAll',
'ReduceAny',
'ScalarToArray',
'ScalarToTensor',
'TupleToArray',
'GeSwitch',
'Merge',
'SameTypeShape',
'CheckBprop',
'CheckValid',
'BoundingBoxEncode',
'BoundingBoxDecode',
'L2Normalize',
'ScatterAdd',
'ScatterSub',
'ScatterMul',
'ScatterDiv',
'ScatterNd',
'ScatterMax',
'ScatterMin',
'ScatterNdAdd',
'ScatterNdSub',
'ScatterNonAliasingAdd',
'ReverseV2',
'Rint',
'ResizeNearestNeighbor',
'HistogramFixedWidth',
'Pad',
'MirrorPad',
'GatherNd',
'TensorScatterUpdate',
'TensorScatterAdd',
'ScatterUpdate',
'ScatterNdUpdate',
'Floor',
'NMSWithMask',
'IOU',
'Partial',
'MakeRefKey',
'Depend',
'UpdateState',
'identity',
'AvgPool',
# Back Primitive
'Equal',
'EqualCount',
'NotEqual',
'Greater',
'GreaterEqual',
'LogicalNot',
'LogicalAnd',
'LogicalOr',
'Size',
'DepthwiseConv2dNative',
'UnsortedSegmentSum',
'UnsortedSegmentMin',
'UnsortedSegmentMax',
'UnsortedSegmentProd',
"AllGather",
"AllReduce",
"AllSwap",
"ReduceScatter",
"Broadcast",
"ReduceOp",
'ScalarCast',
'GetNext',
'ReduceMax',
'ReduceMin',
'ReduceProd',
'CumProd',
'Log',
'Log1p',
'SigmoidCrossEntropyWithLogits',
'FloorDiv',
'FloorMod',
'TruncateDiv',
'TruncateMod',
'Ceil',
'Acosh',
'Asinh',
"PReLU",
"Cos",
"Cosh",
"ACos",
"Diag",
"DiagPart",
'Eye',
'Assign',
'AssignAdd',
'AssignSub',
"Sin",
"Sinh",
"Asin",
"LSTM",
"Abs",
"BinaryCrossEntropy",
"KLDivLoss",
"SparseApplyAdagrad",
"SparseApplyAdagradV2",
"SpaceToDepth",
"DepthToSpace",
"Conv2DBackpropInput",
"ComputeAccidentalHits",
"Sign",
"LARSUpdate",
"Round",
"Eps",
"ApplyFtrl",
"SpaceToBatch",
"SparseApplyFtrl",
"SparseApplyFtrlV2",
"FusedSparseFtrl",
"ApplyProximalAdagrad",
"SparseApplyProximalAdagrad",
"FusedSparseProximalAdagrad",
"ApplyAdaMax",
"ApplyAdadelta",
"ApplyAdagrad",
"ApplyAdagradV2",
"ApplyAddSign",
"ApplyPowerSign",
"ApplyGradientDescent",
"ApplyProximalGradientDescent",
"BatchToSpace",
"Atan2",
"ApplyRMSProp",
"ApplyCenteredRMSProp",
"SpaceToBatchND",
"BatchToSpaceND",
"SquareSumAll",
"BitwiseAnd",
"BitwiseOr",
"BitwiseXor",
"BesselI0e",
"BesselI1e",
"Atan",
"Atanh",
"Tan",
"BasicLSTMCell",
"BroadcastTo",
"DataFormatDimMap",
"ApproximateEqual",
"InplaceUpdate",
"InTopK",
"UniformCandidateSampler",
"LogUniformCandidateSampler",
"LRN",
"Mod",
"ConfusionMatrix",
"PopulationCount",
"ParallelConcat",
"Push",
"Pull",
"PullWeight",
"PushWeight",
"ReLUV2",
"SparseToDense",
"SparseTensorDenseMatmul",
"MatrixInverse",
"Range",
"IndexAdd",
"PQC",
"Evolution",
"BondForce",
"BondEnergy",
"BondAtomEnergy",
"BondForceWithAtomEnergy",
"BondForceWithAtomVirial",
"DihedralForce",
"DihedralEnergy",
"DihedralAtomEnergy",
"DihedralForceWithAtomEnergy",
"AngleForce",
"AngleEnergy",
"AngleAtomEnergy",
"AngleForceWithAtomEnergy",
'PMEReciprocalForce',
'LJForce',
'LJForceWithPMEDirectForce',
'LJEnergy',
'PMEExcludedForce',
'PMEEnergy',
"Dihedral14LJForce",
"Dihedral14LJEnergy",
"Dihedral14LJForceWithDirectCF",
"Dihedral14LJCFForceWithAtomEnergy",
"Dihedral14LJAtomEnergy",
"Dihedral14CFEnergy",
"MDIterationLeapFrog",
"Dihedral14CFAtomEnergy",
"GetCenterOfGeometry",
"MDTemperature",
"NeighborListUpdate",
"MDIterationLeapFrogLiujian",
"CrdToUintCrd",
"MDIterationSetupRandState",
"TransferCrd",
"AdaptiveAvgPool2D"
]
__all__.sort()
|
the-stack_0_24217 | import os
import shutil
from conans import ConanFile, tools
from conan.tools.cmake import CMake, CMakeToolchain
from conan.tools.layout import cmake_layout
class TestPackage(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "CMakeDeps", "CMakeToolchain", "VirtualBuildEnv", "VirtualRunEnv"
@property
def _package(self):
return self.deps_cpp_info["tensorflow"]
def layout(self):
cmake_layout(self)
def generate(self):
cmake_tc = CMakeToolchain(self)
cmake_tc.variables["_package_name"] = self._package.name
cmake_tc.variables["_package_version"] = self._package.version
cmake_tc.preprocessor_definitions["_package_name"] = self._package.name
cmake_tc.preprocessor_definitions["_package_version"] = self._package.version
cmake_tc.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self):
test = os.path.join(self.cpp.build.bindirs[0], "test_package")
self.run(test, env="conanrun")
self._remove_artifacts()
#####################################################################################
def _remove_artifacts(self):
for file in ["conan.lock", "conanbuildinfo.txt", "conaninfo.txt", "graph_info.json"]:
file_path = os.path.join(self.folders.source_folder, file)
if os.path.isfile(file_path):
os.remove(file_path)
shutil.rmtree(self.folders.build_folder, ignore_errors=True)
|
the-stack_0_24218 | """Base class for nodes for writing C-code to create a vxXXXNode objects.
Done by parsing the graph parameters of the xml description of the node.
"""
from xml.dom import minidom
import logging
from graphml_parser import parse_common
from graphml_parser import graphml_parser
class BaseNode:
"""Class for parsing node with the given class name.
"""
# Class-global attributes for all function node instances
border_mode_count = 0
def __init__(self):
"""Specified according to the OpenVX standard
Note that the first index is not counted
(i.e. the graph index in the node creation function)
"""
self.node_has_errors = False
self.dry_run = False
self.node_info = minidom.Node()
def reset_parameters(self, graphparser, current_node, dry_run):
"""Used to reset internal parameters before parsing is done in the subclasses"""
self.node_has_errors = False
self.dry_run = dry_run
self.node_info = graphparser.get_function_node_info(current_node)
def parse(self, graphparser, current_node, assignment_string, dry_run=False):
"""Parsing of xml and creation of dummy function node.
Note that this function should only be called
if the graph function node class was not found.
This might mean that the node name was not found,
or that the parse function of the subclass has not been implemented.
"""
self.reset_parameters(graphparser, current_node, dry_run)
parsed_string = ""
#===============
# ERROR CHECKING
#===============
# Dummy node only used if something with the naming of a function node went wrong
self.set_graph_has_errors(graphparser, current_node, "ERROR: Node implementation not found.\nMaybe node name is wrong?\n")
#=================
# PARSE PARAMETERS
#=================
parsed_string += "ERROR: Node implementation not found. Maybe node name is wrong?"
parsed_string += self.append_to_io_arrays(graphparser, current_node)
return parsed_string
def parse_border_mode(self, graphparser, current_node, node_ref_string, dry_run=False):
"""Parsing of border mode parameters from xml.
This functionality lies in base_node bacause it is possible to set border mode for
all nodes but it might not be supported by a node implementation.
The function node C-name is given by node_ref_string
Returns C code for setting the border mode on the function node."""
parsed_string = ""
border_mode = parse_common.parse_parameter('vx_border_mode_e', current_node)
if border_mode and not dry_run:
logging.debug('border_mode: ' + border_mode)
if BaseNode.border_mode_count == 0:
parsed_string += " {} border_mode;\n".format(get_border_mode_type(graphparser.vx_version))
BaseNode.border_mode_count += 1
if border_mode == "VX_BORDER_MODE_CONSTANT" or "VX_BORDER_MODE_REPLICATE" or "VX_BORDER_MODE_UNDEFINED":
parsed_string += " border_mode.mode = " + get_border_mode(graphparser.vx_version, border_mode) + ";\n"
if border_mode == "VX_BORDER_MODE_CONSTANT":
constant_value = parse_common.parse_parameter('constant_value', current_node)
logging.debug('constant_value: ' + constant_value)
if constant_value:
parsed_string += " border_mode" + get_border_constant(graphparser.vx_version, constant_value) + ";\n"
else:
self.set_graph_has_errors(graphparser, current_node,
"ERROR: VX_BORDER_MODE_CONSTANT requires a constant_value parameter\n")
else:
self.set_graph_has_errors(graphparser, current_node,
"ERROR: Border mode {} not found.\nMaybe spelling is wrong?\n".format(border_mode))
parsed_string += " vxSetNodeAttribute({}, {}, &border_mode, " \
"sizeof({}));\n".format(node_ref_string,
get_border_mode_attribute(graphparser.vx_version),
get_border_mode_type(graphparser.vx_version))
return parsed_string
def set_graph_has_errors(self, graphparser, current_node, errorstring):
"""Sets an error text on the given function node."""
self.node_has_errors = True
graphparser.graph_has_errors = True
parse_common.set_text_on_node(graphparser.validation_output_graph, current_node, errorstring, 'Red', True)
def require_nbr_input_edges(self, graphparser, current_node, nbr_edges):
if (len(self.node_info.input_image_node_ids) != nbr_edges):
logging.warning('WARNING: Wrong number of input edges')
self.set_graph_has_errors(graphparser, current_node, "ERROR: Wrong number of input edges\n")
def require_nbr_output_edges(self, graphparser, current_node, nbr_edges):
if (len(self.node_info.output_image_node_ids) != nbr_edges):
logging.warning('WARNING: Wrong number of output edges')
self.set_graph_has_errors(graphparser, current_node, "ERROR: Wrong number of output edges\n")
def require_2_input_edges_labeled(self, graphparser, current_node):
if not (((self.node_info.input_edge_labels[0] == "in1") and (self.node_info.input_edge_labels[1] == "in2"))
or ((self.node_info.input_edge_labels[0] == "in2") and (self.node_info.input_edge_labels[1] == "in1"))):
logging.warning('WARNING: Incorrect naming of input edge labels (in1 and in2 required)')
self.set_graph_has_errors(graphparser, current_node, "ERROR: Incorrect naming of input edge labels (in1 and in2 required)\n")
def require_2_output_edges_labeled(self, graphparser, current_node):
if not (((self.node_info.output_edge_labels[0] == "out1") and (self.node_info.output_edge_labels[1] == "out2"))
or ((self.node_info.output_edge_labels[0] == "out2") and (self.node_info.output_edge_labels[1] == "out1"))):
logging.warning('WARNING: Incorrect naming of output edge labels (out1 and out2 required)')
self.set_graph_has_errors(graphparser, current_node, "ERROR: Incorrect naming of output edge labels (out1 and out2 required)\n")
def append_to_io_arrays_strip(self, graphparser, current_node):
"""Special version of append_to_io_arrays for strip mode.
Here we don't use refcounted nodes type, simply save the dynamic nodes to list.
This function assumes the node lists that current_node is compared against
can not contain duplicates of nodes (node ids).
"""
parsed_string = ""
if graphparser.strip_io: # Also set I/O images
if any(e in self.node_info.input_image_node_ids for e in
graphparser.get_indexed_names('input_image_nodes')):
parsed_string += " (io_nodes->input_nodes)[" + \
str(graphparser.get_index_for_function_node_in_list('input',
current_node)) + "] = function_node;\n"
if any(e in self.node_info.output_image_node_ids for e in
graphparser.get_indexed_names('output_image_nodes')):
parsed_string += " (io_nodes->output_nodes)[" + \
str(graphparser.get_index_for_function_node_in_list('output',
current_node)) + "] = function_node;\n"
for idx, itemlist in enumerate(graphparser.get_dynamic_function_nodes_info()):
if itemlist[0] == current_node:
parsed_string += " dynamic_nodes[" + str(idx) + "] = function_node;\n"
return parsed_string
def append_to_io_arrays(self, graphparser, current_node):
"""Appends reference to the current node in the corresp. I/O C-array if it is a I/O function node
This function assumes the node lists that current_node is compared against
can not contain duplicates of nodes (node ids).
"""
# Special function if strip_mode
if graphparser.strip_mode:
return self.append_to_io_arrays_strip(graphparser, current_node)
parsed_string = ""
if graphparser.using_refcounted_assignment_string(current_node):
parsed_string += " function_node_rc = node_rc_create(function_node);\n"
for idx, itemlist in enumerate(graphparser.get_dynamic_function_nodes_info()):
if itemlist[0] == current_node:
parsed_string += " dynamic_nodes[" + str(idx) + "] = node_rc_copy_ref(function_node_rc);\n"
if any(e in self.node_info.output_image_node_ids for e in graphparser.get_indexed_names('debug_image_nodes')):
parsed_string += " (nodes->debug_input_nodes)[" + \
str(graphparser.get_index_for_function_node_in_list('debug_input', current_node)) + "] = node_rc_copy_ref(function_node_rc);\n"
if any(e in self.node_info.input_image_node_ids for e in graphparser.get_indexed_names('debug_image_nodes')):
parsed_string += " (nodes->debug_output_nodes)[" + \
str(graphparser.get_index_for_function_node_in_list('debug_output', current_node)) + "] = node_rc_copy_ref(function_node_rc);\n"
return parsed_string
def parse_input_parameter(self, graphparser, index, node_info):
"""Creates C-code node input image parameter for a given index in node_info.input_data_node_ids[]."""
if node_info.input_image_node_ids[index] in graphparser.get_indexed_names('virtual_image_nodes'):
return ", internal_images[" + str(graphparser.get_indexed_names('virtual_image_nodes').index(node_info.input_image_node_ids[index])) + "]"
elif node_info.input_image_node_ids[index] in graphparser.get_indexed_names('input_image_nodes'):
return ", input_images[" + str(graphparser.get_indexed_names('input_image_nodes').index(node_info.input_image_node_ids[index])) + "]"
elif node_info.input_image_node_ids[index] in graphparser.get_indexed_names('output_image_nodes'):
return ", output_images[" + str(graphparser.get_indexed_names('output_image_nodes').index(node_info.input_image_node_ids[index])) + "]"
else:
return "ERROR: Input parameter missing!!!\n\n"
def parse_output_parameter(self, graphparser, index, node_info):
"""Creates C-code node output image parameter for a given index in node_info.output_data_node_ids[]."""
if node_info.output_image_node_ids[index] in graphparser.get_indexed_names('virtual_image_nodes'):
return ", internal_images[" + str(graphparser.get_indexed_names('virtual_image_nodes').index(node_info.output_image_node_ids[index])) + "]"
elif node_info.output_image_node_ids[index] in graphparser.get_indexed_names('input_image_nodes'):
return ", input_images[" + str(graphparser.get_indexed_names('input_image_nodes').index(node_info.output_image_node_ids[index])) + "]"
elif node_info.output_image_node_ids[index] in graphparser.get_indexed_names('output_image_nodes'):
return ", output_images[" + str(graphparser.get_indexed_names('output_image_nodes').index(node_info.output_image_node_ids[index])) + "]"
else:
return "ERROR: Output parameter missing!!!\n\n"
def parse_input_parameters(self, graphparser, current_node):
"""Creates the C-code node input image parameters."""
parsed_string = ""
if (len(self.node_info.input_image_node_ids) == 1):
parsed_string += self.parse_input_parameter(graphparser, 0, self.node_info)
elif (len(self.node_info.input_image_node_ids) == 2):
if 'in1' in self.node_info.input_edge_labels and 'in2' in self.node_info.input_edge_labels:
parsed_string += self.parse_input_parameter(graphparser, self.node_info.input_edge_labels.index('in1'), self.node_info)
parsed_string += self.parse_input_parameter(graphparser, self.node_info.input_edge_labels.index('in2'), self.node_info)
else:
logging.warning('WARNING: missing label on input node!\n')
return parsed_string
def parse_output_parameters(self, graphparser, current_node):
"""Creates the C-code node input image parameters."""
parsed_string = ""
if (len(self.node_info.output_image_node_ids) == 1):
parsed_string += self.parse_output_parameter(graphparser, 0, self.node_info)
elif (len(self.node_info.output_image_node_ids) == 2):
if 'out1' in self.node_info.output_edge_labels and 'out2' in self.node_info.output_edge_labels:
parsed_string += self.parse_output_parameter(graphparser, self.node_info.output_edge_labels.index('out1'), self.node_info)
parsed_string += self.parse_output_parameter(graphparser, self.node_info.output_edge_labels.index('out2'), self.node_info)
else:
logging.warning('WARNING: missing label on output node!\n')
return parsed_string
def parse_parameter(self, graphparser, parameter, current_node):
"""Creates a C-code function node parameter
Searches for a parameter that matches the given type in parameter.
The first occurrence found will be used. If the function node creation function has multiple
parameters of the given type, a more general function must be called.
"""
parameter_value = parse_common.parse_parameter(parameter, current_node)
if parameter_value != "":
parameter_value = ", " + parameter_value
return parameter_value
def parse_single_parameter(self, graphparser, parameter, current_node):
"""Creates a C-code function node parameter
Searches for a parameter that matches the given type in parameter.
The first occurrence found will be used. If the function node creation function has multiple
parameters of the given type, a more general function must be called.
"""
return parse_common.parse_parameter(parameter, current_node)
# Helper functions to generate code for different OpenVX versions
def get_border_mode_type(vx_version):
if vx_version == graphml_parser.VX_VERSION_1_0_1:
return "vx_border_mode_t"
else:
return "vx_border_t"
def get_border_mode(vx_version, border_mode):
if vx_version == graphml_parser.VX_VERSION_1_0_1:
border_modes = {"VX_BORDER_MODE_CONSTANT" : "VX_BORDER_MODE_CONSTANT",
"VX_BORDER_MODE_REPLICATE" : "VX_BORDER_MODE_REPLICATE",
"VX_BORDER_MODE_UNDEFINED" : "VX_BORDER_MODE_UNDEFINED"}
else:
border_modes = {"VX_BORDER_MODE_CONSTANT" : "VX_BORDER_CONSTANT",
"VX_BORDER_MODE_REPLICATE" : "VX_BORDER_REPLICATE",
"VX_BORDER_MODE_UNDEFINED" : "VX_BORDER_UNDEFINED"}
return border_modes[border_mode]
def get_border_constant(vx_version, constant_value):
# TODO: This functions needs to parse the node if we need border modes for non-U8 operations!
if vx_version is graphml_parser.VX_VERSION_1_0_1:
return ".constant_value = " + constant_value
else:
return ".constant_value.U8 = " + constant_value
def get_border_mode_attribute(vx_version):
if vx_version == graphml_parser.VX_VERSION_1_0_1:
return "VX_NODE_ATTRIBUTE_BORDER_MODE"
else:
return "VX_NODE_BORDER"
|
the-stack_0_24220 | # -*- coding: utf-8 -*-
import json
from flask import Blueprint, request, redirect, url_for
from common.auth.session import Session
from common.worker.command import Command
from uuid import uuid4
bp_worker_client = Blueprint("worker_client", __name__, template_folder="/templates")
@bp_worker_client.route("/authKey", methods = ['GET'])
def worker_client_authkey():
# 추 후 client ip 체크 기능 추가
if 'HTTP_X_FORWARDED_FOR' in request.environ:
ip = request.environ['HTTP_X_FORWARDED_FOR'].split(',')[0]
elif 'REMOTE_ADDR' in request.environ:
ip = request.list_storage_class([request.environ['REMOTE_ADDR']])[0]
token = str(uuid4())
result = {
"ip":ip
}
print(result)
return json.dumps(result)
@bp_worker_client.route("/req/sync", methods =['POST'])
def client_command_sync_clients():
if not Session().checkSession():
return redirect(url_for('index_sign.signin_view'))
command = Command()
command.syncClinetCount()
result = {"result": True}
return json.dumps(result)
@bp_worker_client.route("/req/sync/device", methods =['POST'])
def client_command_sync_clients_device():
if not Session().checkSession():
return redirect(url_for('index_sign.signin_view'))
command = Command()
command.syncClinetCount()
result = {"result": True}
return json.dumps(result)
@bp_worker_client.route("/get/count", methods =['POST'])
def client_command_get_command_to_client_count():
if not Session().checkSession():
return redirect(url_for('index_sign.signin_view'))
command = Command()
clients = command.getClientList()
commands = command.getCommandList()
connectToClient = {}
for server in commands:
connectToClient[server] = 0
for client in clients:
if clients[client] in list(connectToClient.keys()):
connectToClient[clients[client]] = connectToClient[clients[client]] + 1
result = {"result": True, "data":connectToClient}
return json.dumps(result)
@bp_worker_client.route("/get/list", methods =['POST'])
def client_command_get_command_to_client_list():
if not Session().checkSession():
return redirect(url_for('index_sign.signin_view'))
command = Command()
clients = command.getClientList()
data = []
for client in clients:
data.append(client)
result = {"result": True, "data":data}
return json.dumps(result) |
the-stack_0_24222 | import simpleReading as sr
import biHorarioReading as br
import triHorarioReading as tr
from datetime import date
class powerSpot:
def __init__(self, name, cpe, nif, typeOfMeter):
self.name = name
self.cpe = cpe
self.nif = nif
self.typeOfMeter = typeOfMeter
readingObject = None
if typeOfMeter == 1:
readingObject = sr.makeSimpleReading(0)
elif typeOfMeter == 2:
readingObject = br.makeBiHorarioReading(0,0)
elif typeOfMeter == 3:
readingObject = tr.makeTriHorarioReading(0,0,0)
self.lastReading = readingObject
self.lastReadingDate = date.today().strftime("%d/%m/%Y")
def updateReading(self, *argv):
try:
self.lastReading.updateReading(*argv)
self.lastReadingDate = date.today().strftime("%d/%m/%Y")
return True
except ValueError:
return False
def getLastReading(self):
reading = self.lastReading.getLastReading()
reading["data"] = self.lastReadingDate
return reading
def getReadingArgs(self):
if type(self.lastReading) is sr.simpleReading:
return ["Simples"]
elif type(self.lastReading) is br.biHorarioReading:
return ["Fora Vazio", "Vazio"]
elif type(self.lastReading) is tr.triHorarioReading:
return ["Ponta", "Vazio", "Cheias"] |
the-stack_0_24225 | #
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
# How these tests work #
###############################################################################
#
# This test file contains some unit tests and an integration test.
#
# The units tests use the same parameters with cuML and the reference
# implementation to compare strict parity of specific components.
#
# The integration tests compare that, when fitting and forecasting separately,
# our implementation performs better or approximately as good as the reference
# (it mostly serves to test that we don't have any regression)
#
# Note that there are significant differences between our implementation and
# the reference, and perfect parity cannot be expected for integration tests.
import pytest
import numpy as np
import os
import warnings
import pandas as pd
from scipy.optimize.optimize import approx_fprime
from sklearn.model_selection import train_test_split
import statsmodels.api as sm
import cudf
import cuml.tsa.arima as arima
from cuml.common.input_utils import input_to_host_array
from cuml.test.utils import stress_param
###############################################################################
# Test data #
###############################################################################
class ARIMAData:
"""Contains a dataset name and associated metadata
"""
def __init__(self, batch_size, n_obs, n_test, dataset,
tolerance_integration):
self.batch_size = batch_size
self.n_obs = n_obs
self.n_test = n_test
self.dataset = dataset
self.tolerance_integration = tolerance_integration
self.n_train = n_obs - n_test
# ARIMA(1,0,1) with intercept
test_101c = ARIMAData(
batch_size=8,
n_obs=15,
n_test=2,
dataset="long_term_arrivals_by_citizenship",
tolerance_integration=0.01
)
# ARIMA(0,0,2) with intercept
test_002c = ARIMAData(
batch_size=7,
n_obs=20,
n_test=2,
dataset="net_migrations_auckland_by_age",
tolerance_integration=0.01
)
# ARIMA(0,1,0) with intercept
test_010c = ARIMAData(
batch_size=4,
n_obs=17,
n_test=2,
dataset="cattle",
tolerance_integration=0.01
)
# ARIMA(1,1,0)
test_110 = ARIMAData(
batch_size=1,
n_obs=137,
n_test=5,
dataset="police_recorded_crime",
tolerance_integration=0.01
)
# ARIMA(0,1,1) with intercept
test_011c = ARIMAData(
batch_size=16,
n_obs=28,
n_test=2,
dataset="deaths_by_region",
tolerance_integration=0.05
)
# ARIMA(1,2,1) with intercept
test_121c = ARIMAData(
batch_size=2,
n_obs=137,
n_test=10,
dataset="population_estimate",
tolerance_integration=0.01
)
# ARIMA(1,1,1) with intercept (missing observations)
test_111c_missing = ARIMAData(
batch_size=2,
n_obs=137,
n_test=10,
dataset="population_estimate_missing",
tolerance_integration=0.01
)
# ARIMA(1,0,1)(1,1,1)_4
test_101_111_4 = ARIMAData(
batch_size=3,
n_obs=101,
n_test=10,
dataset="alcohol",
tolerance_integration=0.01
)
# ARIMA(5,1,0)
test_510 = ARIMAData(
batch_size=3,
n_obs=101,
n_test=10,
dataset="alcohol",
tolerance_integration=0.02
)
# ARIMA(1,1,1)(2,0,0)_4 with intercept
test_111_200_4c = ARIMAData(
batch_size=14,
n_obs=123,
n_test=10,
dataset="hourly_earnings_by_industry",
tolerance_integration=0.01
)
# ARIMA(1,1,1)(2,0,0)_4 with intercept (missing observations)
test_111_200_4c_missing = ARIMAData(
batch_size=14,
n_obs=123,
n_test=10,
dataset="hourly_earnings_by_industry_missing",
tolerance_integration=0.01
)
# ARIMA(1,1,2)(0,1,2)_4
test_112_012_4 = ARIMAData(
batch_size=2,
n_obs=179,
n_test=10,
dataset="passenger_movements",
tolerance_integration=0.001
)
# ARIMA(1,1,1)(1,1,1)_12
test_111_111_12 = ARIMAData(
batch_size=12,
n_obs=279,
n_test=20,
dataset="guest_nights_by_region",
tolerance_integration=0.001
)
# ARIMA(1,1,1)(1,1,1)_12 (missing observations)
test_111_111_12_missing = ARIMAData(
batch_size=12,
n_obs=279,
n_test=20,
dataset="guest_nights_by_region_missing",
tolerance_integration=0.001
)
# Dictionary matching a test case to a tuple of model parameters
# (a test case could be used with different models)
# (p, d, q, P, D, Q, s, k) -> ARIMAData
test_data = [
# ((1, 0, 1, 0, 0, 0, 0, 1), test_101c),
((0, 0, 2, 0, 0, 0, 0, 1), test_002c),
((0, 1, 0, 0, 0, 0, 0, 1), test_010c),
((1, 1, 0, 0, 0, 0, 0, 0), test_110),
((0, 1, 1, 0, 0, 0, 0, 1), test_011c),
((1, 2, 1, 0, 0, 0, 0, 1), test_121c),
((1, 1, 1, 0, 0, 0, 0, 1), test_111c_missing),
((1, 0, 1, 1, 1, 1, 4, 0), test_101_111_4),
((5, 1, 0, 0, 0, 0, 0, 0), test_510),
((1, 1, 1, 2, 0, 0, 4, 1), test_111_200_4c),
((1, 1, 1, 2, 0, 0, 4, 1), test_111_200_4c_missing),
((1, 1, 2, 0, 1, 2, 4, 0), test_112_012_4),
stress_param((1, 1, 1, 1, 1, 1, 12, 0), test_111_111_12),
stress_param((1, 1, 1, 1, 1, 1, 12, 0), test_111_111_12_missing),
]
# Dictionary for lazy-loading of datasets
# (name, dtype) -> (pandas dataframe, cuDF dataframe)
lazy_data = {}
# Dictionary for lazy-evaluation of reference fits
# (p, d, q, P, D, Q, s, k, name, dtype) -> SARIMAXResults
lazy_ref_fit = {}
def extract_order(tup):
"""Extract the order from a tuple of parameters"""
p, d, q, P, D, Q, s, k = tup
return (p, d, q), (P, D, Q, s), k
data_path = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'ts_datasets')
def get_dataset(data, dtype):
"""Load a dataset with a given dtype or return a previously loaded dataset
"""
key = (data.dataset, np.dtype(dtype).name)
if key not in lazy_data:
y = pd.read_csv(
os.path.join(data_path, "{}.csv".format(data.dataset)),
usecols=range(1, data.batch_size + 1), dtype=dtype)
y_train, y_test = train_test_split(y, test_size=data.n_test,
shuffle=False)
y_train_cudf = cudf.from_pandas(y_train).fillna(np.nan)
y_test_cudf = cudf.from_pandas(y_test)
lazy_data[key] = (y_train, y_train_cudf, y_test, y_test_cudf)
return lazy_data[key]
def get_ref_fit(data, order, seasonal_order, intercept, dtype):
"""Compute a reference fit of a dataset with the given parameters and dtype
or return a previously computed fit
"""
y_train, *_ = get_dataset(data, dtype)
key = order + seasonal_order + \
(intercept, data.dataset, np.dtype(dtype).name)
if key not in lazy_ref_fit:
ref_model = [sm.tsa.SARIMAX(y_train[col], order=order,
seasonal_order=seasonal_order,
trend='c' if intercept else 'n')
for col in y_train.columns]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
lazy_ref_fit[key] = [model.fit(disp=0) for model in ref_model]
return lazy_ref_fit[key]
###############################################################################
# Utility functions #
###############################################################################
def mase(y_train, y_test, y_fc, s):
y_train_np = input_to_host_array(y_train).array
y_test_np = input_to_host_array(y_test).array
y_fc_np = input_to_host_array(y_fc).array
diff = np.abs(y_train_np[s:] - y_train_np[:-s])
scale = np.zeros(y_train_np.shape[1])
for ib in range(y_train_np.shape[1]):
scale[ib] = diff[~np.isnan(diff)].mean(axis=0)
scale = diff[~np.isnan(diff[:, ib]), ib].mean()
error = np.abs(y_fc_np - y_test_np).mean(axis=0)
return np.mean(error / scale)
def fill_interpolation(df_in):
np_arr = df_in.to_numpy()
for ib in range(np_arr.shape[1]):
n = len(np_arr)
start, end = -1, 0
while start < n - 1:
if not np.isnan(np_arr[start+1, ib]):
start += 1
end = start + 1
elif end < n and np.isnan(np_arr[end, ib]):
end += 1
else:
if start == -1:
np_arr[:end, ib] = np_arr[end, ib]
elif end == n:
np_arr[start+1:, ib] = np_arr[start, ib]
else:
for j in range(start+1, end):
coef = (j - start) / (end - start)
np_arr[j, ib] = (
(1. - coef) * np_arr[start, ib]
+ coef * np_arr[end, ib]
)
start = end
end = start + 1
return pd.DataFrame(np_arr, columns=df_in.columns)
###############################################################################
# Tests #
###############################################################################
@pytest.mark.parametrize('key, data', test_data)
@pytest.mark.parametrize('dtype', [np.float64])
def test_integration(key, data, dtype):
"""Full integration test: estimate, fit, forecast
"""
order, seasonal_order, intercept = extract_order(key)
s = max(1, seasonal_order[3])
y_train, y_train_cudf, y_test, _ = get_dataset(data, dtype)
# Get fit reference model
ref_fits = get_ref_fit(data, order, seasonal_order, intercept, dtype)
# Create and fit cuML model
cuml_model = arima.ARIMA(y_train_cudf,
order=order,
seasonal_order=seasonal_order,
fit_intercept=intercept,
output_type='numpy')
cuml_model.fit()
# Predict
y_fc_cuml = cuml_model.forecast(data.n_test)
y_fc_ref = np.zeros((data.n_test, data.batch_size))
for i in range(data.batch_size):
y_fc_ref[:, i] = ref_fits[i].get_prediction(
data.n_train, data.n_obs - 1).predicted_mean
# Compare results: MASE must be better or within the tolerance margin
mase_ref = mase(y_train, y_test, y_fc_ref, s)
mase_cuml = mase(y_train, y_test, y_fc_cuml, s)
assert mase_cuml < mase_ref * (1. + data.tolerance_integration)
def _statsmodels_to_cuml(ref_fits, cuml_model, order, seasonal_order,
intercept, dtype):
"""Utility function to transfer the parameters from a statsmodels'
SARIMAXResults object to a cuML ARIMA object.
.. note:: be cautious with the intercept, it is not always equivalent
in statsmodels and cuML models (it depends on the order).
"""
nb = cuml_model.batch_size
N = cuml_model.complexity
x = np.zeros(nb * N, dtype=np.float64)
for ib in range(nb):
x[ib*N:(ib+1)*N] = ref_fits[ib].params[:N]
cuml_model.unpack(x)
def _predict_common(key, data, dtype, start, end, num_steps=None, level=None,
simple_differencing=True):
"""Utility function used by test_predict and test_forecast to avoid
code duplication.
"""
order, seasonal_order, intercept = extract_order(key)
_, y_train_cudf, *_ = get_dataset(data, dtype)
# Get fit reference model
ref_fits = get_ref_fit(data, order, seasonal_order, intercept, dtype)
# Create cuML model
cuml_model = arima.ARIMA(y_train_cudf,
order=order,
seasonal_order=seasonal_order,
fit_intercept=intercept,
output_type='numpy',
simple_differencing=simple_differencing)
# Feed the parameters to the cuML model
_statsmodels_to_cuml(ref_fits, cuml_model, order, seasonal_order,
intercept, dtype)
# Predict or forecast
# Reference (statsmodels)
ref_preds = np.zeros((end - start, data.batch_size))
for i in range(data.batch_size):
ref_preds[:, i] = ref_fits[i].get_prediction(
start, end - 1).predicted_mean
if level is not None:
ref_lower = np.zeros((end - start, data.batch_size))
ref_upper = np.zeros((end - start, data.batch_size))
for i in range(data.batch_size):
temp_pred = ref_fits[i].get_forecast(num_steps)
ci = temp_pred.summary_frame(alpha=1-level)
ref_lower[:, i] = ci["mean_ci_lower"].to_numpy()
ref_upper[:, i] = ci["mean_ci_upper"].to_numpy()
# cuML
if num_steps is None:
cuml_pred = cuml_model.predict(start, end)
elif level is not None:
cuml_pred, cuml_lower, cuml_upper = \
cuml_model.forecast(num_steps, level)
else:
cuml_pred = cuml_model.forecast(num_steps)
# Compare results
np.testing.assert_allclose(cuml_pred, ref_preds, rtol=0.001, atol=0.01)
if level is not None:
np.testing.assert_allclose(
cuml_lower, ref_lower, rtol=0.005, atol=0.01)
np.testing.assert_allclose(
cuml_upper, ref_upper, rtol=0.005, atol=0.01)
@pytest.mark.parametrize('key, data', test_data)
@pytest.mark.parametrize('dtype', [np.float64])
@pytest.mark.parametrize('simple_differencing', [True, False])
def test_predict_in(key, data, dtype, simple_differencing):
"""Test in-sample prediction against statsmodels (with the same values
for the model parameters)
"""
_predict_common(key, data, dtype, data.n_train // 2, data.n_obs,
simple_differencing=simple_differencing)
@pytest.mark.parametrize('key, data', test_data)
@pytest.mark.parametrize('dtype', [np.float64])
@pytest.mark.parametrize('simple_differencing', [True, False])
def test_predict_inout(key, data, dtype, simple_differencing):
"""Test in- and ouf-of-sample prediction against statsmodels (with the
same values for the model parameters)
"""
_predict_common(key, data, dtype, data.n_train // 2, data.n_train,
simple_differencing=simple_differencing)
@pytest.mark.parametrize('key, data', test_data)
@pytest.mark.parametrize('dtype', [np.float64])
@pytest.mark.parametrize('simple_differencing', [True, False])
def test_forecast(key, data, dtype, simple_differencing):
"""Test out-of-sample forecasting against statsmodels (with the same
values for the model parameters)
"""
_predict_common(key, data, dtype, data.n_train, data.n_obs, data.n_test,
simple_differencing=simple_differencing)
@pytest.mark.parametrize('key, data', test_data)
@pytest.mark.parametrize('dtype', [np.float64])
@pytest.mark.parametrize('level', [0.5, 0.95])
def test_intervals(key, data, dtype, level):
"""Test forecast confidence intervals against statsmodels (with the same
values for the model parameters)
"""
_predict_common(key, data, dtype, data.n_train, data.n_obs, data.n_test,
level)
@pytest.mark.parametrize('key, data', test_data)
@pytest.mark.parametrize('dtype', [np.float64])
@pytest.mark.parametrize('simple_differencing', [True, False])
def test_loglikelihood(key, data, dtype, simple_differencing):
"""Test loglikelihood against statsmodels (with the same values for the
model parameters)
"""
order, seasonal_order, intercept = extract_order(key)
_, y_train_cudf, *_ = get_dataset(data, dtype)
# Get fit reference model
ref_fits = get_ref_fit(data, order, seasonal_order, intercept, dtype)
# Create cuML model
cuml_model = arima.ARIMA(y_train_cudf,
order=order,
seasonal_order=seasonal_order,
fit_intercept=intercept,
simple_differencing=simple_differencing)
# Feed the parameters to the cuML model
_statsmodels_to_cuml(ref_fits, cuml_model, order, seasonal_order,
intercept, dtype)
# Compute loglikelihood
cuml_llf = cuml_model.llf
ref_llf = np.array([ref_fit.llf for ref_fit in ref_fits])
# Compare results
np.testing.assert_allclose(cuml_llf, ref_llf, rtol=0.01, atol=0.01)
@pytest.mark.parametrize('key, data', test_data)
@pytest.mark.parametrize('dtype', [np.float64])
def test_gradient(key, data, dtype):
"""
Test batched gradient implementation against scipy non-batched
gradient.
.. note:: it doesn't test that the loglikelihood is correct!
"""
order, seasonal_order, intercept = extract_order(key)
p, _, q = order
P, _, Q, _ = seasonal_order
N = p + P + q + Q + intercept + 1
h = 1e-8
_, y_train_cudf, *_ = get_dataset(data, dtype)
# Create cuML model
cuml_model = arima.ARIMA(y_train_cudf,
order=order,
seasonal_order=seasonal_order,
fit_intercept=intercept)
# Get an estimate of the parameters and pack them into a vector
cuml_model._estimate_x0()
x = cuml_model.pack()
# Compute the batched loglikelihood gradient
batched_grad = cuml_model._loglike_grad(x, h)
# Iterate over the batch to compute a reference gradient
scipy_grad = np.zeros(N * data.batch_size)
for i in range(data.batch_size):
# Create a model with only the current series
model_i = arima.ARIMA(y_train_cudf[y_train_cudf.columns[i]],
order=order,
seasonal_order=seasonal_order,
fit_intercept=intercept)
def f(x):
return model_i._loglike(x)
scipy_grad[N * i: N * (i + 1)] = \
approx_fprime(x[N * i: N * (i + 1)], f, h)
# Compare
np.testing.assert_allclose(batched_grad, scipy_grad, rtol=0.001, atol=0.01)
@pytest.mark.parametrize('key, data', test_data)
@pytest.mark.parametrize('dtype', [np.float64])
def test_start_params(key, data, dtype):
"""Test starting parameters against statsmodels
"""
order, seasonal_order, intercept = extract_order(key)
y_train, y_train_cudf, *_ = get_dataset(data, dtype)
# fillna for reference to match cuML initial estimation strategy
y_train_nona = fill_interpolation(y_train)
# Create models
cuml_model = arima.ARIMA(y_train_cudf,
order=order,
seasonal_order=seasonal_order,
fit_intercept=intercept)
ref_model = [sm.tsa.SARIMAX(y_train_nona[col], order=order,
seasonal_order=seasonal_order,
trend='c' if intercept else 'n')
for col in y_train_nona.columns]
# Estimate reference starting parameters
N = cuml_model.complexity
nb = data.batch_size
x_ref = np.zeros(N * nb, dtype=dtype)
for ib in range(nb):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
x_ref[ib*N:(ib+1)*N] = ref_model[ib].start_params[:N]
# Estimate cuML starting parameters
cuml_model._estimate_x0()
x_cuml = cuml_model.pack()
# Compare results
np.testing.assert_allclose(x_cuml, x_ref, rtol=0.001, atol=0.01)
|
the-stack_0_24226 | class Solution:
def longestCommonSub(self, a, n, b, m):
dp = [[0]*(m+1) for i in range(n+1)]
for i in range(1, n+1):
for j in range(1, m+1):
if a[i-1] == b[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return dp[n][m]
def sequencePatternMatching(self, a:str, b:str)->bool:
n = len(a)
m = len(b)
lcs = self.longestCommonSub(a, n,b,m)
if lcs == n:
return True
else:
return False
if __name__ == '__main__':
a = "AXY"
b = "ADXCPY"
print(Solution().sequencePatternMatching(a, b)) |
the-stack_0_24228 | fp16 = dict(loss_scale=512.)
# model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
pretrained=None,
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=5,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=5,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=5,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
])
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.0001, nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.0001), max_per_img=200))
# dataset settings
dataset_type = 'Underwater'
data_root = 'data/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
albu_train_transforms = [
dict(
type='OneOf',
transforms=[
dict(type='MedianBlur'),
],
p=1),
]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(4096, 800), keep_ratio=True),
#dict(type='Resize', img_scale=[(4096, 600), (4096, 1000)],multiscale_mode='range', keep_ratio=True),
#dict(type='Pad', size_divisor=32),
#dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=True),
#dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels'],
meta_keys=('filename', 'ori_shape', 'img_shape',
'pad_shape', 'scale_factor'))
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=[(3072, 450), (3072, 600), (3072, 750),(4096, 600), (4096, 800), (4096, 1000),(5120, 750), (5120, 1000), (5120, 1250), (6144, 900), (6144, 1200), (6144, 1500)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=1,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file='data/train/annotations/train.json',
img_prefix=data_root + 'train/image/',
pipeline=train_pipeline),
test=dict(
type=dataset_type,
ann_file='data/train/annotations/testA.json',
img_prefix=data_root + 'test-A-image/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=12)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/cas_x101_64x4d_fpn_htc_reTrain'
load_from = 'data/pretrained/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e_20190408-0e50669c.pth'
resume_from = None
workflow = [('train', 1)]
|
the-stack_0_24229 | #!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Accept or Reject VPC Peering request
"""
import openstack
openstack.enable_logging(True)
conn = openstack.connect(cloud='otc')
peering = conn.vpc.get_peering("peering_id")
# accept VPC peering request
set_status = 'accept'
peering = conn.vpc.set_peering(peering=peering, set_status=set_status)
print(peering)
# Reject VPC peering request
set_status = 'reject'
peering = conn.vpc.set_peering(peering=peering, set_status=set_status)
print(peering)
|
the-stack_0_24230 | from __future__ import annotations
import math
from itertools import product
from typing import Any
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import PathPatch, Wedge
from matplotlib.path import Path
from pymatgen.core import Structure
from pymatviz.utils import NumArray, covalent_radii, jmol_colors
# plot_structure_2d() and its helphers get_rot_matrix() and unit_cell_to_lines() were
# inspired by ASE https://wiki.fysik.dtu.dk/ase/ase/visualize/visualize.html#matplotlib
def get_rot_matrix(angles: str, rotation: NumArray = np.eye(3)) -> NumArray:
"""Convert Euler angles to a rotation matrix.
Note the order of angles matters. 50x,40z != 40z,50x.
Args:
angles (str): Euler angles (in degrees) formatted as '-10y,50x,120z'
rotation (NumArray, optional): Initial rotation matrix. Defaults to identity
matrix.
Returns:
NumArray: 3d rotation matrix.
"""
if angles == "":
return rotation.copy() # return initial rotation matrix if no angles
for i, a in [
("xyz".index(s[-1]), math.radians(float(s[:-1]))) for s in angles.split(",")
]:
s = math.sin(a)
c = math.cos(a)
if i == 0:
rotation = np.dot(rotation, [(1, 0, 0), (0, c, s), (0, -s, c)])
elif i == 1:
rotation = np.dot(rotation, [(c, 0, -s), (0, 1, 0), (s, 0, c)])
else:
rotation = np.dot(rotation, [(c, s, 0), (-s, c, 0), (0, 0, 1)])
return rotation
def unit_cell_to_lines(cell: NumArray) -> tuple[NumArray, NumArray, NumArray]:
"""Convert lattice vectors to plot lines.
Args:
cell (NumArray): Lattice vectors.
Returns:
tuple[NumArray, NumArray, NumArray]:
- Lines
- z-indices that sort plot elements into out-of-plane layers
- lines used to plot the unit cell
"""
n_lines = 0
segments = []
for c in range(3):
norm = math.sqrt(sum(cell[c] ** 2))
segment = max(2, int(norm / 0.3))
segments.append(segment)
n_lines += 4 * segment
lines = np.empty((n_lines, 3))
z_indices = np.empty(n_lines, int)
unit_cell_lines = np.zeros((3, 3))
n1 = 0
for c in range(3):
segment = segments[c]
dd = cell[c] / (4 * segment - 2)
unit_cell_lines[c] = dd
P = np.arange(1, 4 * segment + 1, 4)[:, None] * dd
z_indices[n1:] = c
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
n2 = n1 + segment
lines[n1:n2] = P + i * cell[c - 2] + j * cell[c - 1]
n1 = n2
return lines, z_indices, unit_cell_lines
def plot_structure_2d(
struct: Structure,
ax: plt.Axes = None,
rotation: str = "10x,10y,0z",
atomic_radii: float | dict[str, float] | None = None,
colors: dict[str, str | list[float]] = None,
scale: float = 1,
show_unit_cell: bool = True,
site_labels: bool | dict[str, str | float] | list[str | float] = True,
label_kwargs: dict[str, Any] = {"fontsize": 14},
) -> plt.Axes:
"""Plot pymatgen structure object in 2d. Uses matplotlib.
Inspired by ASE's ase.visualize.plot.plot_atoms()
https://wiki.fysik.dtu.dk/ase/ase/visualize/visualize.html#matplotlib
pymatviz aims to give similar output to ASE but supports disordered structures and
avoids the conversion hassle of AseAtomsAdaptor.get_atoms(pmg_struct).
For example, these two snippets should give very similar output:
```py
from pymatgen.ext.matproj import MPRester
mp_19017 = MPRester().get_structure_by_material_id("mp-19017")
# ASE
from ase.visualize.plot import plot_atoms
from pymatgen.io.ase import AseAtomsAdaptor
plot_atoms(AseAtomsAdaptor().get_atoms(mp_19017), rotation="10x,10y,0z", radii=0.5)
# pymatviz
from pymatviz import plot_structure_2d
plot_structure_2d(mp_19017)
```
Args:
struct (Structure): Must be pymatgen instance. ax (plt.Axes, optional):
Matplotlib axes on which to plot. Defaults to None. rotation (str, optional):
Euler angles in degrees in the form '10x,20y,30z'
describing angle at which to view structure. Defaults to "".
atomic_radii (float | dict[str, float], optional): Either a scaling factor for
default radii or map from element symbol to atomic radii. Defaults to
covalent radii.
colors (dict[str, str | list[float]], optional): Map from element symbols to
colors, either a named color (str) or rgb(a) values like (0.2, 0.3, 0.6).
Defaults to JMol colors.
scale (float, optional): Scaling of the plotted atoms and lines. Defaults to 1.
show_unit_cell (bool, optional): Whether to draw unit cell. Defaults to True.
site_labels (bool | dict[str, str | float] | list[str | float]): How to annotate
lattice sites. If True, labels are element symbols. If a dict, should map
element symbols to labels. If a list, must be same length as the number of
sites in the crystal. Defaults to True.
label_kwargs (dict, optional): Keyword arguments for matplotlib.text.Text.
Defaults to {"fontsize": 14}.
Returns:
plt.Axes: matplotlib Axes instance with plotted structure.
"""
if ax is None:
ax = plt.gca()
elems = [str(site.species.elements[0]) for site in struct]
if isinstance(site_labels, list):
assert len(site_labels) == len(
struct
), "Length mismatch between site_labels and struct"
if colors is None:
colors = jmol_colors
if atomic_radii is None or isinstance(atomic_radii, float):
atomic_radii = 0.7 * covalent_radii * (atomic_radii or 1)
else:
assert isinstance(atomic_radii, dict)
# make sure all present elements are assigned a radius
missing = {el for el in elems if el not in atomic_radii}
assert not missing, f"atomic_radii is missing keys: {missing}"
radii = np.array([atomic_radii[el] for el in elems]) # type: ignore
n_atoms = len(struct)
rot_matrix = get_rot_matrix(rotation)
unit_cell = struct.lattice.matrix
if show_unit_cell:
lines, z_indices, unit_cell_lines = unit_cell_to_lines(unit_cell)
corners = np.array(list(product((0, 1), (0, 1), (0, 1))))
cell_vertices = np.dot(corners, unit_cell)
cell_vertices = np.dot(cell_vertices, rot_matrix)
else:
lines = np.empty((0, 3))
z_indices = None
unit_cell_lines = None
cell_vertices = None
n_lines = len(lines)
positions = np.empty((n_atoms + n_lines, 3))
site_coords = np.array([site.coords for site in struct])
positions[:n_atoms] = site_coords
positions[n_atoms:] = lines
# determine which lines should be hidden behind other objects
for idx in range(n_lines):
this_layer = unit_cell_lines[z_indices[idx]]
occlu_top = ((site_coords - lines[idx] + this_layer) ** 2).sum(1) < radii**2
occlu_bot = ((site_coords - lines[idx] - this_layer) ** 2).sum(1) < radii**2
if any(occlu_top & occlu_bot):
z_indices[idx] = -1
positions = np.dot(positions, rot_matrix)
site_coords = positions[:n_atoms]
min_coords = (site_coords - radii[:, None]).min(0)
max_coords = (site_coords + radii[:, None]).max(0)
if show_unit_cell:
min_coords = np.minimum(min_coords, cell_vertices.min(0))
max_coords = np.maximum(max_coords, cell_vertices.max(0))
means = (min_coords + max_coords) / 2
coord_ranges = 1.05 * (max_coords - min_coords)
offset = scale * (means - coord_ranges / 2)
positions *= scale
positions -= offset
if n_lines > 0:
unit_cell_lines = np.dot(unit_cell_lines, rot_matrix)[:, :2] * scale
# sort so we draw from back to front along out-of-plane (z-)axis
for idx in positions[:, 2].argsort():
xy = positions[idx, :2]
start = 0
if idx < n_atoms:
# loop over all species on a site (usually just 1 for ordered sites)
for elem, occupancy in struct[idx].species.items():
elem = str(elem)
radius = atomic_radii[elem] * scale # type: ignore
wedge = Wedge(
xy,
radius,
360 * start,
360 * (start + occupancy),
facecolor=colors[elem],
edgecolor="black",
)
ax.add_patch(wedge)
txt = elem
if isinstance(site_labels, dict) and elem in site_labels:
txt = site_labels.get(elem, "")
if isinstance(site_labels, list):
txt = site_labels[idx]
if site_labels:
# place element symbol half way along outer wedge edge for
# disordered sites
half_way = 2 * np.pi * (start + occupancy / 2)
direction = np.array([math.cos(half_way), math.sin(half_way)])
text_offset = (
(0.5 * radius) * direction if occupancy < 1 else (0, 0)
)
txt_kwds = dict(ha="center", va="center", **label_kwargs)
ax.text(*(xy + text_offset), txt, **txt_kwds)
start += occupancy
else: # draw unit cell
idx -= n_atoms
# only draw line if not obstructed by an atom
if z_indices[idx] != -1:
hxy = unit_cell_lines[z_indices[idx]]
path = PathPatch(Path((xy + hxy, xy - hxy)))
ax.add_patch(path)
width, height, _ = scale * coord_ranges
ax.set(xlim=[0, width], ylim=[0, height], aspect="equal")
ax.axis("off")
return ax
|
the-stack_0_24231 | """
End slide.
"""
from mplslide import FONT, new_slide, slide_heading
def slides():
"""
Create end slide.
"""
fig = new_slide()
slide_heading(fig, 'Thank You!')
props = dict(fontproperties=FONT, fontsize=56, alpha=0.7,
horizontalalignment='center')
fig.text(0.5, 0.5, 'This entire presentation was made in Matplotlib:',
**props)
t = fig.text(0.5, 0.4, '\nhttps://github.com/QuLogic/scipy2021-mpl-update',
**props)
t.set_url('https://github.com/QuLogic/scipy2021-mpl-update')
return fig
|
the-stack_0_24232 | import Bio.SeqUtils.ProtParam
import os
import ASAP.FeatureExtraction as extract
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Chothia numbering definition for CDR regions
CHOTHIA_CDR = {'L': {'1': [24, 34], '2': [50, 56], '3': [89, 97]}, 'H':{'1': [26, 32], '2': [52, 56], '3': [95, 102]}}
canonical_direct = '../data/pigs_canonical.txt'
SET_NAME = 'IGHV'
IF_ONLY_HEAVY = True
CNT_DB = 1
CNT_TARGET = 1
REFERENCE_PATH_TESTCASE = '../testCase/IGHV/reference-IGHV/'
TARGETING_PATH_TESTCASE = '../testCase/IGHV/targeting-MMP-IGHV/'
TARGET_DESIRE_SIZE = 134 #44 #IGHV
targeting_direct = TARGETING_PATH_TESTCASE
reference_direct = REFERENCE_PATH_TESTCASE
Amino, Num, Germ, DatasetName, DatasetSize = extract.ReadAminoNumGerm(targeting_direct, reference_direct)
seq_id = []
for i, name in enumerate(DatasetName):
# if i<2:
# continue
tmp= [[] for j in range(int(DatasetSize[i]))]
# for every seq in that dataset
for j in range(int(DatasetSize[i])):
seq_name = name + '_' + str(j)
seq_id.append(seq_name)
# raw sequence
def sequence_raw():
def getSequenceHL(sname):
SH = ''.join(Amino['H'][sname])
SL = ''
if not IF_ONLY_HEAVY:
SL = ''.join(Amino['L'][sname])
return SL, SH
else:
return [SH]
with open('../results/'+SET_NAME +'_Sequence.csv','w') as fi:
fi.write('sequence name, ')
if not IF_ONLY_HEAVY:
fi.write('light chain, ')
fi.write('heavy chain\n')
for sname in seq_id:
fi.write(sname + ',' + ','.join(getSequenceHL(sname))+ '\n')
# sequence with numbering
def sequence_num():
def getSequenceHL_num(sname):
NH = ','.join(Num['H'][sname])
SH = ','.join(Amino['H'][sname])
NL = ','.join(Num['L'][sname])
SL = ','.join(Amino['L'][sname])
return NH, SH, NL, SL
with open('./Sequence_numbered.csv','w') as fi:
for sname in seq_id:
NH, SH, NL, SL = getSequenceHL_num(sname)
fi.write(sname + ' light num,' + NL + '\n')
fi.write(sname + ' light seq,' + SL + '\n')
fi.write(sname + ' heavy num,' + NH + '\n')
fi.write(sname + ' heavy seq,' + SH + '\n')
# sequence with region
def sequence_region():
def getSequenceHL_region(sname):
NH = Num['H'][sname]
HFW1, HCDR1, HFW2, HCDR2, HFW3, HCDR3, HFW4 = '', '', '', '', '', '', ''
for i, number in enumerate(NH):
if number[-1] >= 'A' and number[-1] <= 'Z':
num_i = int(number[:-1])
else:
num_i = int(number)
if num_i < CHOTHIA_CDR['H']['1'][0]:
HFW1 += Amino['H'][sname][i]
elif num_i <= CHOTHIA_CDR['H']['1'][1]:
HCDR1+= Amino['H'][sname][i]
elif num_i < CHOTHIA_CDR['H']['2'][0]:
HFW2 += Amino['H'][sname][i]
elif num_i <= CHOTHIA_CDR['H']['2'][1]:
HCDR2 += Amino['H'][sname][i]
elif num_i < CHOTHIA_CDR['H']['3'][0]:
HFW3 += Amino['H'][sname][i]
elif num_i <= CHOTHIA_CDR['H']['3'][1]:
HCDR3 += Amino['H'][sname][i]
else:
HFW4 += Amino['H'][sname][i]
if IF_ONLY_HEAVY:
return ''.join(HFW1), ''.join(HCDR1), ''.join(HFW2), ''.join(HCDR2), ''.join(HFW3), ''.join(HCDR3), ''.join(
HFW4)
else:
NL = Num['L'][sname]
LFW1, LCDR1, LFW2, LCDR2, LFW3, LCDR3, LFW4 = '', '', '', '', '', '', ''
for i, number in enumerate(NL):
if number[-1] >= 'A' and number[-1] <= 'Z':
num_i = int(number[:-1])
else:
num_i = int(number)
if num_i < CHOTHIA_CDR['L']['1'][0]:
LFW1 += Amino['L'][sname][i]
elif num_i <= CHOTHIA_CDR['L']['1'][1]:
LCDR1 += Amino['L'][sname][i]
elif num_i < CHOTHIA_CDR['L']['2'][0]:
LFW2 += Amino['L'][sname][i]
elif num_i <= CHOTHIA_CDR['L']['2'][1]:
LCDR2 += Amino['L'][sname][i]
elif num_i < CHOTHIA_CDR['L']['3'][0]:
LFW3 += Amino['L'][sname][i]
elif num_i <= CHOTHIA_CDR['L']['3'][1]:
LCDR3 += Amino['L'][sname][i]
else:
LFW4 += Amino['L'][sname][i]
return ''.join(LFW1), ''.join(LCDR1), ''.join(LFW2), ''.join(LCDR2), ''.join(LFW3), ''.join(LCDR3), ''.join(LFW4),\
''.join(HFW1), ''.join(HCDR1), ''.join(HFW2), ''.join(HCDR2), ''.join(HFW3), ''.join(HCDR3), ''.join(HFW4)
with open('../results/'+SET_NAME +'_Sequence_region.csv','w') as fi:
if IF_ONLY_HEAVY:
fi.write(
'sequence id, heavy chain FW1, heavy chain CDR1, heavy chain FW2, heavy chain CDR2, heavy chain FW3, heavy chain CDR3, heavy chain FW4\n')
else:
fi.write('sequence id, light chain FW1, light chain CDR1, light chain FW2, light chain CDR2, light chain FW3, light chain CDR3, light chain FW4, '+
'heavy chain FW1, heavy chain CDR1, heavy chain FW2, heavy chain CDR2, heavy chain FW3, heavy chain CDR3, heavy chain FW4\n')
for sname in seq_id:
fi.write(sname + ',' + ','.join(getSequenceHL_region(sname)) + '\n')
def feature_distribution():
from collections import Counter
write_out = [[] for i in range(len(seq_id))]
for fi in range(1,12):
feat = []
for item in write_out:
feat.append(item[fi])
feat_count = Counter(feat)
sorted_count = sorted(feat_count.items(), key=lambda kv: kv[1], reverse=True)
if fi==11:
feat_type = sorted_count[0][0].split('_')[0]
else:
feat_type = sorted_count[0][0].split('_')[0] + sorted_count[0][0].split('_')[1]
with open('./Features_distribution_'+feat_type+'.csv','w') as fi:
for i in range(len(sorted_count)):
fi.write(sorted_count[i][0]+','+str(sorted_count[i][1])+'\n')
def feature():
write_out = [[] for i in range(len(seq_id))]
for i in range(len(seq_id)):
write_out[i].append(seq_id[i])
for idx, f in enumerate(AllFeatureVectors[i]):
if f == 1:
write_out[i].append(AllFeatureNames[idx])
with open('../results/'+SET_NAME +'_Features.csv', 'w') as fi:
fi.write('sequence id, ')
if not IF_ONLY_HEAVY:
fi.write('light chain V region, light chain J region, ')
fi.write('heavy chain V region, heavy chain J region, ')
if not IF_ONLY_HEAVY:
fi.write('Canonical L1, Canonical L2, Canonical L3, ')
fi.write('Canonical H1, Canonical H2, Canonical H3, ' )
fi.write('PI, frequent positional motif\n')
for i in range(len(write_out)):
fi.write(','.join(write_out[i]) + '\n')
def correlation_feature():
###### plot correlation matrix
data = pd.DataFrame(AllFeatureVectors, columns=AllFeatureNames)
# print(AllFeatureVectors.shape)
corr = data.corr()
import numpy as np
corr = np.array(corr)
with open('../results/Pearson_feature_correlation.csv', 'w') as fi:
fi.write('Feature value 1, Feature value 2, Pearson coefficient\n')
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
# if str(corr[i][j])=='nan':
# print('nan', AllFeatureNames[i], AllFeatureNames[j])
fi.write(AllFeatureNames[i]+ ','+AllFeatureNames[j]+','+ str(corr[i][j])+'\n')
# data.to_csv(r'../results/Feature_test.csv', header=True)
# fig = plt.figure(figsize=(100, 70))
# ax = fig.add_subplot(111)
# cax = ax.matshow(corr, cmap='seismic', vmin=-1, vmax =1)
# fig.colorbar(cax)
# ticks = np.arange(0, len(data.columns),1)
# ax.set_xticks(ticks)
# plt.xticks(rotation=90)
# ax.set_yticks(ticks)
# ax.set_xticklabels(data.columns)
# ax.set_yticklabels(data.columns)
# plt.savefig('../results/feature_correlation.png')
# corr = pd.DataFrame(corr, index=AllFeatureNames, columns=AllFeatureNames)
###### display pairwise correlation value
# au_corr = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
# au_corr = au_corr.stack().sort_values(ascending=False)
# au_corr = corr.unstack()
# au_corr.columns = [' 1', 'Feature 2', 'Pearson Correlation Value']
# au_corr = pd.DataFrame(au_corr.values, columns = ['Feature 1, Feature 2, Pearson Correlation Value'])
# au_corr.to_csv(r'../results/Pearson_feature_correlation.csv', header=True)
# print(len(au_corr))
# print(AllFeatureVectors[:, AllFeatureNames.index('Germ_LJ_IGKJ3*01')])
# print(AllFeatureVectors[:, AllFeatureNames.index('Canonical_L2_0')])
# def JaccardCoefficientAnalysis():
# df = pd.DataFrame(AllFeatureVectors, columns=AllFeatureNames)
#
# interest_feature=['Germ_HV_IGHV3-23*01', 'Canonical_H2_6', 'Germ_HJ_IGHJ4*02', 'Germ_HJ_IGHJ6*01', 'Germ_LV_IGKV1D-39*01',
# 'Canonical_H2_5', 'Germ_HJ_IGHJ4*01']
# jac_sim = np.eye(len(AllFeatureNames))
# for i in range(len(AllFeatureNames)):
# for j in range(i+1, len(AllFeatureNames)):
# if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
# continue
# a = AllFeatureVectors[:, i]
# b = AllFeatureVectors[:, j]
# aandb =0
# aorb = 0
# for k in range(len(a)):
# if a[k]==b[k] and a[k]==1:
# aandb +=1
# if a[k]==1 or b[k]==1:
# aorb +=1
# if aorb==0:
# jac_tmp=0
# else:
# jac_tmp = float(aandb)/aorb
# if AllFeatureNames[i] in interest_feature and AllFeatureNames[j] in interest_feature:
# print(AllFeatureNames[i], AllFeatureNames[j], jac_tmp)
#
# jac_sim[i][j]=jac_tmp
# jac_sim[j][i]=jac_tmp
#
#
# with open('../results/Jaccard_feature_coefficient.csv', 'w') as fi:
# fi.write('Feature value 1, Feature value 2, Jaccard coefficient\n')
# for i in range(len(AllFeatureNames)):
# for j in range(i+1, len(AllFeatureNames)):
# if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
# continue
# fi.write(AllFeatureNames[i]+ ','+AllFeatureNames[j]+','+ str(jac_sim[i][j])+'\n')
#
#
# fig = plt.figure(figsize=(100, 70))
# ax = fig.add_subplot(111)
# cax = ax.matshow(jac_sim, cmap='Blues', vmin=0, vmax =1)
# fig.colorbar(cax)
# ticks = np.arange(0, len(df.columns),1)
# ax.set_xticks(ticks)
# plt.xticks(rotation=90)
# ax.set_yticks(ticks)
# ax.set_xticklabels(df.columns)
# ax.set_yticklabels(df.columns)
# plt.savefig('../results/feature_coefficient.png')
#
# # print(AllFeatureVectors[:,AllFeatureNames.index('Germ_LJ_IGKJ3*01')])
# # print(AllFeatureVectors[:,AllFeatureNames.index('Canonical_L2_0*01')])
# # where(np.triu(np.ones(jac_sim.shape), k=1).astype(np.bool))
# # au_jac = jac_sim.where(np.triu(np.ones(jac_sim.shape), k=0).astype(np.bool))
# # au_jac = au_jac.stack().sort_values(ascending=False)
# # au_jac = jac_sim.unstack()
# # print(len(au_jac))
# # au_jac.to_csv(r'../results/Jaccard_feature_coefficient.csv', header=True)
def JaccardCoefficientAnalysis():
PDB_size = DatasetSize[0]
jac_sim_PDB = np.eye(len(AllFeatureNames))
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
continue
a = AllFeatureVectors[:PDB_size, i]
b = AllFeatureVectors[:PDB_size, j]
aandb =0
aorb = 0
for k in range(len(a)):
if a[k]==b[k] and a[k]==1:
aandb +=1
if a[k]==1 or b[k]==1:
aorb +=1
if aorb==0:
jac_tmp=0
else:
jac_tmp = float(aandb)/aorb
# if AllFeatureNames[i] == 'Germ_HV_IGHV3-23*01' and AllFeatureNames[j] =='Canonical_H2_6':
# print(a, b, jac_tmp)
# if AllFeatureNames[i] in interest_feature and AllFeatureNames[j] in interest_feature:
# print(AllFeatureNames[i], AllFeatureNames[j], jac_tmp)
jac_sim_PDB[i][j]=jac_tmp
jac_sim_PDB[j][i]=jac_tmp
jac_sim_MMP = np.eye(len(AllFeatureNames))
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
continue
a = AllFeatureVectors[PDB_size:, i]
b = AllFeatureVectors[PDB_size:, j]
aandb =0
aorb = 0
for k in range(len(a)):
if a[k]==b[k] and a[k]==1:
aandb +=1
if a[k]==1 or b[k]==1:
aorb +=1
if aorb==0:
jac_tmp=0
else:
jac_tmp = float(aandb)/aorb
# if AllFeatureNames[i] in interest_feature and AllFeatureNames[j] in interest_feature:
# print(AllFeatureNames[i], AllFeatureNames[j], jac_tmp)
jac_sim_MMP[i][j]=jac_tmp
jac_sim_MMP[j][i]=jac_tmp
with open('../results/'+SET_NAME+'_Jaccard Feature Coefficient.csv', 'w') as fi:
fi.write('Feature value 1, Feature value 2, Jaccard coefficient for reference set, Jaccard coefficient for MMP-targeting set\n')
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
continue
fi.write(AllFeatureNames[i]+ ','+AllFeatureNames[j]+','+ str(jac_sim_PDB[i][j])+','+ str(jac_sim_MMP[i][j])+'\n')
if __name__=='__main__':
sequence_raw()
sequence_region()
OneHotGerm, GermFeatureNames = extract.GetOneHotGerm(Germ, DatasetSize, DatasetName)
OneHotCanon, CanonFeatureNames = extract.GetOneHotCanon(canonical_direct, Amino, Num, DatasetSize, DatasetName)
CDRH3 = extract.GetCDRH3(Amino, Num)
OneHotPI, PIFeatureNames = extract.GetOneHotPI(CDRH3, DatasetSize, DatasetName)
MultiHotMotif, MotifFeatureNames = extract.MultiHotMotif(CDRH3, DatasetSize, DatasetName)
AllFeatureVectors, AllFeatureNames, _, _ = extract.GetFeatureVectors(OneHotGerm, GermFeatureNames, OneHotCanon, CanonFeatureNames, OneHotPI, PIFeatureNames, MultiHotMotif, MotifFeatureNames)
feature()
# correlation_feature()
JaccardCoefficientAnalysis()
|
the-stack_0_24233 | from textwrap import dedent
from typing import List, Set
import pytest
from brain_brew.representation.yaml.notes import Note, NoteGrouping, Notes, \
NOTES, NOTE_GROUPINGS, FIELDS, GUID, NOTE_MODEL, TAGS, FLAGS
working_notes = {
"test1": {FIELDS: ['first'], GUID: "12345", NOTE_MODEL: "model_name", TAGS: ['noun', 'other']},
"test2": {FIELDS: ['english', 'german'], GUID: "sdfhfghsvsdv", NOTE_MODEL: "LL Test", TAGS: ['marked']},
"no_note_model": {FIELDS: ['first'], GUID: "12345", TAGS: ['noun', 'other']},
"no_note_model2": {FIELDS: ['second'], GUID: "67890", TAGS: ['noun', 'other']},
"no_tags1": {FIELDS: ['first'], GUID: "12345", NOTE_MODEL: "model_name"},
"no_tags2": {FIELDS: ['first'], GUID: "12345", NOTE_MODEL: "model_name", TAGS: []},
"no_model_or_tags": {FIELDS: ['first'], GUID: "12345"},
"test1_with_default_flags": {FIELDS: ['first'], GUID: "12345", NOTE_MODEL: "model_name", TAGS: ['noun', 'other'], FLAGS: 0},
"test1_with_flags": {FIELDS: ['first'], GUID: "12345", NOTE_MODEL: "model_name", TAGS: ['noun', 'other'], FLAGS: 1},
}
working_note_groupings = {
"nothing_grouped": {NOTES: [working_notes["test1"], working_notes["test2"]]},
"note_model_grouped": {NOTES: [working_notes["no_note_model"], working_notes["no_note_model2"]], NOTE_MODEL: "model_name"},
"note_model_grouped2": {NOTES: [working_notes["no_note_model"], working_notes["no_note_model2"]], NOTE_MODEL: "different_model"},
"tags_grouped": {NOTES: [working_notes["no_tags1"], working_notes["no_tags2"]], TAGS: ["noun", "other"]},
"tags_grouped_as_addition": {NOTES: [working_notes["test1"], working_notes["test2"]], TAGS: ["test", "recent"]},
"model_and_tags_grouped": {NOTES: [working_notes["no_model_or_tags"], working_notes["no_model_or_tags"]], NOTE_MODEL: "model_name", TAGS: ["noun", "other"]}
}
working_dpns = {
"one_group": {NOTE_GROUPINGS: [working_note_groupings["nothing_grouped"]]},
"two_groups_two_models": {NOTE_GROUPINGS: [working_note_groupings["nothing_grouped"], working_note_groupings["note_model_grouped"]]},
"two_groups_three_models": {NOTE_GROUPINGS: [working_note_groupings["nothing_grouped"], working_note_groupings["note_model_grouped2"]]},
}
@pytest.fixture(params=working_notes.values())
def note_fixtures(request):
return Note.from_dict(request.param)
@pytest.fixture(params=working_note_groupings.values())
def note_grouping_fixtures(request):
return NoteGrouping.from_dict(request.param)
class TestConstructor:
class TestNote:
@pytest.mark.parametrize("fields, guid, note_model, tags, flags, media", [
([], "", "", [], 0, {}),
(None, None, None, None, None, None),
(["test", "blah", "whatever"], "1234567890x", "model_name", ["noun"], 1, {}),
(["test", "blah", "<img src=\"animal.jpg\">"], "1234567890x", "model_name", ["noun"], 2, {"animal.jpg"}),
])
def test_constructor(self, fields: List[str], guid: str, note_model: str, tags: List[str], flags: int, media: Set[str]):
note = Note(fields=fields, guid=guid, note_model=note_model, tags=tags, flags=flags)
assert isinstance(note, Note)
assert note.fields == fields
assert note.guid == guid
assert note.note_model == note_model
assert note.tags == tags
assert note.flags == flags
# assert note.media_references == media
def test_from_dict(self, note_fixtures):
assert isinstance(note_fixtures, Note)
class TestNoteGrouping:
def test_constructor(self):
note_grouping = NoteGrouping(notes=[Note.from_dict(working_notes["test1"])], note_model=None, tags=None)
assert isinstance(note_grouping, NoteGrouping)
assert isinstance(note_grouping.notes, List)
def test_from_dict(self, note_grouping_fixtures):
assert isinstance(note_grouping_fixtures, NoteGrouping)
class TestDeckPartNote:
def test_constructor(self):
dpn = Notes(note_groupings=[NoteGrouping.from_dict(working_note_groupings["nothing_grouped"])])
assert isinstance(dpn, Notes)
def test_from_dict(self):
dpn = Notes.from_dict({NOTE_GROUPINGS: [working_note_groupings["nothing_grouped"]]})
assert isinstance(dpn, Notes)
class TestDumpToYaml:
@staticmethod
def _make_temp_file(tmpdir):
folder = tmpdir.mkdir("yaml_files")
file = folder.join("test.yaml")
file.write("test")
return file
class TestNote:
@staticmethod
def _assert_dump_to_yaml(tmpdir, ystring, note_name):
file = TestDumpToYaml._make_temp_file(tmpdir)
note = Note.from_dict(working_notes[note_name])
note.dump_to_yaml(str(file))
assert file.read() == ystring
def test_all1(self, tmpdir):
ystring = dedent(f'''\
{FIELDS}:
- first
{GUID}: '12345'
{NOTE_MODEL}: model_name
{TAGS}:
- noun
- other
''')
self._assert_dump_to_yaml(tmpdir, ystring, "test1")
def test_all2(self, tmpdir):
ystring = dedent(f'''\
{FIELDS}:
- english
- german
{GUID}: sdfhfghsvsdv
{NOTE_MODEL}: LL Test
{TAGS}:
- marked
''')
self._assert_dump_to_yaml(tmpdir, ystring, "test2")
def test_no_note_model(self, tmpdir):
ystring = dedent(f'''\
{FIELDS}:
- first
{GUID}: '12345'
{TAGS}:
- noun
- other
''')
self._assert_dump_to_yaml(tmpdir, ystring, "no_note_model")
def test_no_tags(self, tmpdir):
for num, note in enumerate(["no_tags1", "no_tags2"]):
ystring = dedent(f'''\
{FIELDS}:
- first
{GUID}: '12345'
{NOTE_MODEL}: model_name
''')
self._assert_dump_to_yaml(tmpdir.mkdir(str(num)), ystring, note)
def test_with_flags(self, tmpdir):
ystring = dedent(f'''\
{FIELDS}:
- first
{GUID}: '12345'
{FLAGS}: 1
{NOTE_MODEL}: model_name
{TAGS}:
- noun
- other
''')
self._assert_dump_to_yaml(tmpdir, ystring, "test1_with_flags")
def test_with_default_flags(self, tmpdir):
ystring = dedent(f'''\
{FIELDS}:
- first
{GUID}: '12345'
{NOTE_MODEL}: model_name
{TAGS}:
- noun
- other
''')
self._assert_dump_to_yaml(tmpdir, ystring, "test1_with_default_flags")
class TestNoteGrouping:
@staticmethod
def _assert_dump_to_yaml(tmpdir, ystring, note_grouping_name):
file = TestDumpToYaml._make_temp_file(tmpdir)
note = NoteGrouping.from_dict(working_note_groupings[note_grouping_name])
note.dump_to_yaml(str(file))
assert file.read() == ystring
def test_nothing_grouped(self, tmpdir):
ystring = dedent(f'''\
{NOTES}:
- {FIELDS}:
- first
{GUID}: '12345'
{NOTE_MODEL}: model_name
{TAGS}:
- noun
- other
- {FIELDS}:
- english
- german
{GUID}: sdfhfghsvsdv
{NOTE_MODEL}: LL Test
{TAGS}:
- marked
''')
self._assert_dump_to_yaml(tmpdir, ystring, "nothing_grouped")
def test_note_model_grouped(self, tmpdir):
ystring = dedent(f'''\
{NOTE_MODEL}: model_name
{NOTES}:
- {FIELDS}:
- first
{GUID}: '12345'
{TAGS}:
- noun
- other
- {FIELDS}:
- second
{GUID}: '67890'
{TAGS}:
- noun
- other
''')
self._assert_dump_to_yaml(tmpdir, ystring, "note_model_grouped")
def test_note_tags_grouped(self, tmpdir):
ystring = dedent(f'''\
{TAGS}:
- noun
- other
{NOTES}:
- {FIELDS}:
- first
{GUID}: '12345'
{NOTE_MODEL}: model_name
- {FIELDS}:
- first
{GUID}: '12345'
{NOTE_MODEL}: model_name
''')
self._assert_dump_to_yaml(tmpdir, ystring, "tags_grouped")
def test_note_model_and_tags_grouped(self, tmpdir):
ystring = dedent(f'''\
{NOTE_MODEL}: model_name
{TAGS}:
- noun
- other
{NOTES}:
- {FIELDS}:
- first
{GUID}: '12345'
- {FIELDS}:
- first
{GUID}: '12345'
''')
self._assert_dump_to_yaml(tmpdir, ystring, "model_and_tags_grouped")
class TestDeckPartNotes:
@staticmethod
def _assert_dump_to_yaml(tmpdir, ystring, groups: list):
file = TestDumpToYaml._make_temp_file(tmpdir)
note = Notes.from_dict({NOTE_GROUPINGS: [working_note_groupings[name] for name in groups]})
note.dump_to_yaml(str(file))
assert file.read() == ystring
def test_two_groupings(self, tmpdir):
ystring = dedent(f'''\
{NOTE_GROUPINGS}:
- {NOTE_MODEL}: model_name
{TAGS}:
- noun
- other
{NOTES}:
- {FIELDS}:
- first
{GUID}: '12345'
- {FIELDS}:
- first
{GUID}: '12345'
- {NOTES}:
- {FIELDS}:
- first
{GUID}: '12345'
{NOTE_MODEL}: model_name
{TAGS}:
- noun
- other
- {FIELDS}:
- english
- german
{GUID}: sdfhfghsvsdv
{NOTE_MODEL}: LL Test
{TAGS}:
- marked
''')
self._assert_dump_to_yaml(tmpdir, ystring, ["model_and_tags_grouped", "nothing_grouped"])
class TestFunctionality:
class TestGetMediaReferences:
class TestNote:
@pytest.mark.parametrize("fields, expected_count", [
([], 0),
(["nothing", "empty", "can't find nothing here"], 0),
(["<img src=\"animal.jpg\">", "empty", "can't find nothing here"], 1),
(["<img src=\"animal.jpg\">", "<img src=\"animal.jpg\">", "<img src=\"animal.jpg\">"], 1),
(["<img src=\"animal.jpg\">", "<img src=\"food.jpg\">", "<img src=\"object.jpg\">"], 3),
(["<img src=\"animal.jpg\">", "[sound:test.mp3]", "[sound:test.mp3]"], 2),
])
def test_all(self, fields, expected_count):
note = Note(fields=fields, note_model=None, guid="", tags=None, flags=0)
media_found = note.get_all_media_references()
assert isinstance(media_found, Set)
assert len(media_found) == expected_count
class TestGetAllNoteModels:
class TestNoteGrouping:
def test_nothing_grouped(self):
group = NoteGrouping.from_dict(working_note_groupings["nothing_grouped"])
models = group.get_all_known_note_model_names()
assert models == {'LL Test', 'model_name'}
def test_grouped(self):
group = NoteGrouping.from_dict(working_note_groupings["note_model_grouped"])
models = group.get_all_known_note_model_names()
assert models == {'model_name'}
class TestDeckPartNotes:
def test_two_groups_two_models(self):
dpn = Notes.from_dict(working_dpns["two_groups_two_models"])
models = dpn.get_all_known_note_model_names()
assert models == {'LL Test', 'model_name'}
def test_two_groups_three_models(self):
dpn = Notes.from_dict(working_dpns["two_groups_three_models"])
models = dpn.get_all_known_note_model_names()
assert models == {'LL Test', 'model_name', 'different_model'}
# class TestGetAllNotes:
# class TestNoteGrouping:
# def test_nothing_grouped(self):
# group = NoteGrouping.from_dict(working_note_groupings["nothing_grouped"])
# notes = group.get_all_notes_copy([], False)
# assert len(notes) == 2
#
# def test_model_grouped(self):
# group = NoteGrouping.from_dict(working_note_groupings["note_model_grouped"])
# assert group.note_model == "model_name"
# assert all([note.note_model is None for note in group.notes])
#
# notes = group.get_all_notes_copy()
# assert {note.note_model for note in notes} == {"model_name"}
#
# def test_tags_grouped(self):
# group = NoteGrouping.from_dict(working_note_groupings["tags_grouped"])
# assert group.tags == ["noun", "other"]
# assert all([note.tags is None or note.tags == [] for note in group.notes])
#
# notes = group.get_all_notes_copy()
# assert all([note.tags == ["noun", "other"] for note in notes])
#
# def test_tags_grouped_as_addition(self):
# group = NoteGrouping.from_dict(working_note_groupings["tags_grouped_as_addition"])
# assert group.tags == ["test", "recent"]
# assert all([note.tags is not None for note in group.notes])
#
# notes = group.get_all_notes_copy()
# assert notes[0].tags == ['noun', 'other', "test", "recent"]
# assert notes[1].tags == ['marked', "test", "recent"]
#
# def test_no_tags(self):
# group = NoteGrouping.from_dict(working_note_groupings["tags_grouped"])
# group.tags = None
# assert all([note.tags is None or note.tags == [] for note in group.notes])
#
# notes = group.get_all_notes_copy()
# assert all([note.tags == [] for note in notes])
#
|
the-stack_0_24234 | import numpy as np
def set_balance(row, ratio):
if row['ratio 3/(3+5)'] >= ratio:
return '3p'
elif row['ratio 5/(3+5)'] >= ratio:
return '5p'
elif np.isnan(row['reads_3p']) and np.isnan(row['reads_5p']):
return 'unknown'
elif np.isnan(row['reads_3p']):
return '5p'
elif np.isnan(row['reads_5p']):
return '3p'
else:
return 'both'
def find_in_mirna(row, df_loc):
if df_loc[
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['orientation'] == row['orient_loc']) &
(df_loc['stop'] >= row['pos'])].shape[0] != 0:
temp = df_loc[
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['orientation'] == row['orient_loc']) &
(df_loc['stop'] >= row['pos'])].values[0]
if row['orient_loc'] == '+':
start = row['pos'] - temp[2] + 1
stop = row['pos'] - temp[3] - 1
else:
start = -(row['pos'] - temp[3] - 1)
stop = -(row['pos'] - temp[2] + 1)
localiz = [start, stop]
else:
localiz = [np.nan,
np.nan]
return localiz
def find_arm(row):
if row['-/+'] == '+':
if row['start'] - row['start_pre'] < row['stop_pre'] - row['stop']:
return '5p'
else:
return '3p'
if row['-/+'] == '-':
if row['start'] - row['start_pre'] < row['stop_pre'] - row['stop']:
return '3p'
else:
return '5p'
def from_start(row, column_start, column_stop):
if row['orientation'] == '+':
return row['pos'] - row[column_start] + 1
else:
return row[column_stop] - row['pos'] + 1
def from_end(row, column_stop, column_start):
if row['orientation'] == '+':
return row['pos'] - row[column_stop] - 1
else:
return row[column_start] - row['pos'] - 1
def find_localization(row, df_loc):
print(row)
localizations = df_loc[(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['stop'] >= row['pos'])].values
print(localizations)
if len(localizations) > 1:
raise ValueError
else:
return localizations[0]
def if_complex(row, complex_df):
if complex_df[(complex_df['chrom'] == row['chrom']) &
(complex_df['pre_name'] == row['pre_name']) &
(complex_df['id'] == row['id']) &
(complex_df['start_pre'] == row['start_pre']) &
(complex_df['seq_type'] == row['seq_type'])].shape[0] != 0:
values = complex_df[(complex_df['chrom'] == row['chrom']) &
(complex_df['pre_name'] == row['pre_name']) &
(complex_df['id'] == row['id']) &
(complex_df['start_pre'] == row['start_pre']) &
(complex_df['seq_type'] == row['seq_type'])]['complex'].unique()
if 1 in values:
return 1
else:
return 0
else:
return 0
def concat_ints(col):
row = list(col.values)
new_row = []
for x in row:
new_row.append(str(x))
return '"' + ':'.join(new_row) + '"'
def concat_alg(col):
row = list(col.values)
new_row = []
for x in row:
new_row.append(str(x))
new_row = sorted(set(new_row))
return '"' + ':'.join(new_row) + '"'
def type_of_mutation(row):
if len(row['ref']) > len(row['alt']):
return 'del'
elif len(row['ref']) == len(row['alt']):
return 'subst'
elif ',' in row['alt']:
return 'subst'
else:
return 'ins'
def take_from_coord(coordinates, column_name, row):
return coordinates[(coordinates['chr'] == row['chrom']) &
(coordinates['start'] < int(row['pos'])) &
(coordinates['stop'] > int(row['pos']))][column_name].values[0]
def seq_type(value):
if 'hsa-' in value:
return 'mirna'
else:
return 'not_defined'
def subst_type(row):
if row['mutation_type'] == 'subst':
if (((row['ref'] in ['A', 'G']) and (row['alt'] in ['A', 'G'])) or
((row['ref'] in ['C', 'T']) and (row['alt'] in ['C', 'T']))):
return 'transition'
else:
return 'transversion'
else:
return 'n.a.'
|
the-stack_0_24235 | # Copyright 2013 Google Inc. All Rights Reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
from itertools import izip
import logging
import re
from net import bsonrpc
from net import gorpc
from vtdb import cursor
from vtdb import dbexceptions
from vtdb import field_types
# This is the shard name for when the keyrange covers the entire space
# for unsharded database.
SHARD_ZERO = "0"
_errno_pattern = re.compile('\(errno (\d+)\)')
# Map specific errors to specific classes.
_errno_map = {
1062: dbexceptions.IntegrityError,
}
def convert_exception(exc, *args):
new_args = exc.args + args
if isinstance(exc, gorpc.TimeoutError):
return dbexceptions.TimeoutError(new_args)
elif isinstance(exc, gorpc.AppError):
msg = str(exc[0]).lower()
match = _errno_pattern.search(msg)
if match:
mysql_errno = int(match.group(1))
return _errno_map.get(mysql_errno, dbexceptions.DatabaseError)(new_args)
return dbexceptions.DatabaseError(new_args)
elif isinstance(exc, gorpc.ProgrammingError):
return dbexceptions.ProgrammingError(new_args)
elif isinstance(exc, gorpc.GoRpcError):
return dbexceptions.FatalError(new_args)
return exc
# A simple, direct connection to the vttablet query server.
# This is shard-unaware and only handles the most basic communication.
# If something goes wrong, this object should be thrown away and a new one instantiated.
class VtgateConnection(object):
session = None
tablet_type = None
cursorclass = cursor.TabletCursor
_stream_fields = None
_stream_conversions = None
_stream_result = None
_stream_result_index = None
def __init__(self, addr, tablet_type, keyspace, shard, timeout, user=None, password=None, encrypted=False, keyfile=None, certfile=None):
self.addr = addr
self.tablet_type = tablet_type
self.keyspace = keyspace
self.shard = shard
self.timeout = timeout
self.client = bsonrpc.BsonRpcClient(addr, timeout, user, password, encrypted=encrypted, keyfile=keyfile, certfile=certfile)
def __str__(self):
return '<VtgateConnection %s %s %s/%s>' % (self.addr, self.tablet_type, self.keyspace, self.shard)
def dial(self):
try:
if not self.is_closed():
self.close()
self.client.dial()
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def close(self):
if self.session:
self.rollback()
self.client.close()
def is_closed(self):
return self.client.is_closed()
def begin(self):
try:
response = self.client.call('VTGate.Begin', None)
self.session = response.reply
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def commit(self):
try:
session = self.session
self.session = None
self.client.call('VTGate.Commit', session)
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def rollback(self):
try:
session = self.session
self.session = None
self.client.call('VTGate.Rollback', session)
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def cursor(self, cursorclass=None, **kargs):
return (cursorclass or self.cursorclass)(self, **kargs)
def _add_session(self, req):
if self.session:
req['Session'] = self.session
def _update_session(self, response):
if 'Session' in response.reply and response.reply['Session']:
self.session = response.reply['Session']
def _execute(self, sql, bind_variables):
new_binds = field_types.convert_bind_vars(bind_variables)
req = {
'Sql': sql,
'BindVariables': new_binds,
'Keyspace': self.keyspace,
'TabletType': self.tablet_type,
'Shards': [self.shard],
}
self._add_session(req)
fields = []
conversions = []
results = []
rowcount = 0
lastrowid = 0
try:
response = self.client.call('VTGate.ExecuteShard', req)
self._update_session(response)
reply = response.reply
# TODO(sougou): Simplify this check after all servers are deployed
if 'Error' in response.reply and response.reply['Error']:
raise gorpc.AppError(response.reply['Error'], 'VTGate.ExecuteShard')
if 'Result' in reply:
res = reply['Result']
for field in res['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
for row in res['Rows']:
results.append(tuple(_make_row(row, conversions)))
rowcount = res['RowsAffected']
lastrowid = res['InsertId']
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self), sql, bind_variables)
except:
logging.exception('gorpc low-level error')
raise
return results, rowcount, lastrowid, fields
def _execute_batch(self, sql_list, bind_variables_list):
query_list = []
for sql, bind_vars in zip(sql_list, bind_variables_list):
query = {}
query['Sql'] = sql
query['BindVariables'] = field_types.convert_bind_vars(bind_vars)
query_list.append(query)
rowsets = []
try:
req = {
'Queries': query_list,
'Keyspace': self.keyspace,
'TabletType': self.tablet_type,
'Shards': [self.shard],
}
self._add_session(req)
response = self.client.call('VTGate.ExecuteBatchShard', req)
self._update_session(response)
if 'Error' in response.reply and response.reply['Error']:
raise gorpc.AppError(response.reply['Error'], 'VTGate.ExecuteBatchShard')
for reply in response.reply['List']:
fields = []
conversions = []
results = []
rowcount = 0
for field in reply['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
for row in reply['Rows']:
results.append(tuple(_make_row(row, conversions)))
rowcount = reply['RowsAffected']
lastrowid = reply['InsertId']
rowsets.append((results, rowcount, lastrowid, fields))
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self), sql_list, bind_variables_list)
except:
logging.exception('gorpc low-level error')
raise
return rowsets
# we return the fields for the response, and the column conversions
# the conversions will need to be passed back to _stream_next
# (that way we avoid using a member variable here for such a corner case)
def _stream_execute(self, sql, bind_variables):
new_binds = field_types.convert_bind_vars(bind_variables)
req = {
'Sql': sql,
'BindVariables': new_binds,
'Keyspace': self.keyspace,
'TabletType': self.tablet_type,
'Shards': [self.shard],
}
self._add_session(req)
self._stream_fields = []
self._stream_conversions = []
self._stream_result = None
self._stream_result_index = 0
try:
self.client.stream_call('VTGate.StreamExecuteShard', req)
first_response = self.client.stream_next()
reply = first_response.reply['Result']
for field in reply['Fields']:
self._stream_fields.append((field['Name'], field['Type']))
self._stream_conversions.append(field_types.conversions.get(field['Type']))
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self), sql, bind_variables)
except:
logging.exception('gorpc low-level error')
raise
return None, 0, 0, self._stream_fields
def _stream_next(self):
# Terminating condition
if self._stream_result_index is None:
return None
# See if we need to read more or whether we just pop the next row.
while self._stream_result is None:
try:
self._stream_result = self.client.stream_next()
if self._stream_result is None:
self._stream_result_index = None
return None
# A session message, if any comes separately with no rows
# TODO(sougou) get rid of this check. After all the server
# changes, there will always be a 'Session' in the reply.
if 'Session' in self._stream_result.reply and self._stream_result.reply['Session']:
self.session = self._stream_result.reply['Session']
self._stream_result = None
continue
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
except:
logging.exception('gorpc low-level error')
raise
row = tuple(_make_row(self._stream_result.reply['Result']['Rows'][self._stream_result_index], self._stream_conversions))
# If we are reading the last row, set us up to read more data.
self._stream_result_index += 1
if self._stream_result_index == len(self._stream_result.reply['Result']['Rows']):
self._stream_result = None
self._stream_result_index = 0
return row
def _make_row(row, conversions):
converted_row = []
for conversion_func, field_data in izip(conversions, row):
if field_data is None:
v = None
elif conversion_func:
v = conversion_func(field_data)
else:
v = field_data
converted_row.append(v)
return converted_row
def connect(*pargs, **kargs):
conn = VtgateConnection(*pargs, **kargs)
conn.dial()
return conn
|
the-stack_0_24238 | import pytest
@pytest.fixture(scope="session")
def report(dials_data, tmpdir_factory):
data_dir = dials_data("pychef")
mtz = data_dir.join("insulin_dials_scaled_unmerged.mtz").strpath
temp_path = tmpdir_factory.mktemp("test_report")
with temp_path.as_cwd():
from xia2.Modules.Analysis import (
phil_scope,
) # import creates /xia2-debug.txt dropping
from xia2.Modules.Report import Report # import creates /DEFAULT/ dropping
params = phil_scope.extract()
params.batch = []
params.dose.batch = []
yield Report.from_unmerged_mtz(mtz, params, report_dir=temp_path.strpath)
def test_multiplicity_plots(report):
multiplicity_plots = report.multiplicity_plots(dest_path=".")
assert set(multiplicity_plots) == {
"multiplicity_h",
"multiplicity_k",
"multiplicity_l",
}
def test_symmetry_table_html(report):
assert (
report.symmetry_table_html()
== """
<p>
<b>Unit cell:</b> I 2 3 (No. 197)
<br>
<b>Space group:</b> (78.1047, 78.1047, 78.1047, 90, 90, 90)
</p>
"""
)
def test_xtriage(report):
xtriage_report = report.xtriage_report()
assert len(xtriage_report) == 3
assert set(xtriage_report[0][0]) == {"text", "summary", "header", "level"}
def test_batch_dependent_plots(report):
plots = report.batch_dependent_plots()
assert set(plots) == {"i_over_sig_i_vs_batch", "scale_rmerge_vs_batch"}
assert plots["scale_rmerge_vs_batch"]["data"][0]["x"] == list(range(1, 46))
assert plots["i_over_sig_i_vs_batch"]["data"][0]["x"] == list(range(1, 46))
def test_resolution_plots_and_stats(report):
(
overall_stats_table,
merging_stats_table,
stats_plots,
) = report.resolution_plots_and_stats()
assert len(overall_stats_table) == 11
assert overall_stats_table[0] == [
"",
"Overall",
"Low resolution",
"High resolution",
]
assert len(merging_stats_table) == 21
assert merging_stats_table[0] == [
"Resolution (\xc5)",
"N(obs)",
"N(unique)",
"Multiplicity",
"Completeness",
"Mean I",
"Mean I/\u03c3(I)",
"R<sub>merge</sub>",
"R<sub>meas</sub>",
"R<sub>pim</sub>",
"R<sub>anom</sub>",
"CC<sub>\xbd</sub>",
"CC<sub>ano</sub>",
]
assert merging_stats_table[1] == [
"24.70 - 4.36",
2765,
543,
"5.09",
"96.11",
"7201.0",
"72.9",
"0.027",
"0.030",
"0.013",
"0.046",
"0.998*",
"0.598*",
]
assert set(stats_plots) == {
"multiplicity_vs_resolution",
"completeness",
"r_pim",
"i_over_sig_i",
"cc_one_half",
}
def test_intensity_stats_plots(report):
plots = report.intensity_stats_plots()
assert set(plots) == {
"wilson_intensity_plot",
"multiplicities",
"second_moments",
"cumulative_intensity_distribution",
"l_test",
}
def test_pychef(report):
plots = report.pychef_plots()
assert set(plots) == {
"rcp_vs_dose",
"scp_vs_dose",
"completeness_vs_dose",
"rd_vs_batch_difference",
}
|
the-stack_0_24239 | from __future__ import print_function
# Copyright (c) 2017, IGLU consortium
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the NECOTIS research group nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
from timeit import default_timer as timer
# Disable v-sync
import os
os.environ['vblank_mode'] = '0'
# Load configuration file with CPU backend (TinyPanda software rendering)
from panda3d.core import loadPrcFile
loadPrcFile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "ConfigCPU.prc"))
from benchmark import BenchmarkEnvironment
def getFpsAll(nbSteps):
# 'acoustics' world temporarily disabled
env = BenchmarkEnvironment(activeEngines=['physics', 'render'])
start = timer()
env.simulate(nbSteps=nbSteps)
end = timer()
elapsed = (end - start)
env.destroy()
fps = nbSteps/elapsed
return fps
def getFpsRenderOnly(nbSteps):
env = BenchmarkEnvironment(activeEngines=['render'])
start = timer()
env.simulate(nbSteps=nbSteps)
end = timer()
elapsed = (end - start)
env.destroy()
fps = nbSteps/elapsed
return fps
def getFpsPhysicsOnly(nbSteps):
env = BenchmarkEnvironment(activeEngines=['physics'])
start = timer()
env.simulate(nbSteps=nbSteps)
end = timer()
elapsed = (end - start)
env.destroy()
fps = nbSteps/elapsed
return fps
def getFpsAcousticsOnly(nbSteps):
env = BenchmarkEnvironment(activeEngines=['acoustics'])
start = timer()
env.simulate(nbSteps=nbSteps)
end = timer()
elapsed = (end - start)
env.destroy()
fps = nbSteps/elapsed
return fps
def main():
nbSteps = 100
print('FPS (all): ', getFpsAll(nbSteps))
print('FPS (render-only): ', getFpsRenderOnly(nbSteps))
print('FPS (physics-only): ', getFpsPhysicsOnly(nbSteps))
# print 'FPS (acoustics-only): ', getFpsAcousticsOnly(nbSteps)
print('FPS (acoustics-only): ', "temporarily disabled")
return 0
if __name__ == "__main__":
sys.exit(main())
|
the-stack_0_24241 | """
Low-level classes for interacting directly with the Strava API webservers.
"""
from __future__ import division, absolute_import, print_function, unicode_literals
import abc
import logging
import functools
import requests
import six
from six.moves.urllib.parse import urlunsplit, urljoin, urlencode
from stravalib import exc
@six.add_metaclass(abc.ABCMeta)
class ApiV3(object):
"""
This class is responsible for performing the HTTP requests, rate limiting, and error handling.
"""
server = 'www.strava.com'
# Note: The hostname for webhook events is different than normal API requests
# (via http://strava.github.io/api/partner/v3/events/)
server_webhook_events = 'api.strava.com'
api_base = '/api/v3'
def __init__(self, access_token=None, requests_session=None, rate_limiter=None):
"""
Initialize this protocol client, optionally providing a (shared) :class:`requests.Session`
object.
:param access_token: The token that provides access to a specific Strava account.
:type access_token: str
:param requests_session: An existing :class:`requests.Session` object to use.
:type requests_session::class:`requests.Session`
"""
self.log = logging.getLogger('{0.__module__}.{0.__name__}'.format(self.__class__))
self.access_token = access_token
if requests_session:
self.rsession = requests_session
else:
self.rsession = requests.Session()
if rate_limiter is None:
# Make it a dummy function, so we don't have to check if it's defined before
# calling it later
rate_limiter = lambda x=None: None
self.rate_limiter = rate_limiter
def authorization_url(self, client_id, redirect_uri, approval_prompt='auto', scope=None, state=None):
"""
Get the URL needed to authorize your application to access a Strava user's information.
See https://developers.strava.com/docs/authentication/
:param client_id: The numeric developer client id.
:type client_id: int
:param redirect_uri: The URL that Strava will redirect to after successful (or failed) authorization.
:type redirect_uri: str
:param approval_prompt: Whether to prompt for approval even if approval already granted to app.
Choices are 'auto' or 'force'. (Default is 'auto')
:type approval_prompt: str
:param scope: The access scope required. Omit to imply "read" and "activity:read"
Valid values are 'read', 'read_all', 'profile:read_all', 'profile:write', 'profile:read_all',
'activity:read_all', 'activity:write'.
:type scope: list[str]
:param state: An arbitrary variable that will be returned to your application in the redirect URI.
:type state: str
:return: The URL to use for authorization link.
:rtype: str
"""
assert approval_prompt in ('auto', 'force')
if scope is None:
scope = ['read', 'activity:read']
elif isinstance(scope, (six.text_type, six.binary_type)):
scope = [scope]
unsupported = set(scope) - {'read', 'read_all',
'profile:read_all', 'profile:write',
'activity:read', 'activity:read_all',
'activity:write'}
assert not unsupported, 'Unsupported scope value(s): {}'.format(unsupported)
if isinstance(scope, (list, tuple)):
scope = ','.join(scope)
params = {'client_id': client_id,
'redirect_uri': redirect_uri,
'approval_prompt': approval_prompt,
'response_type': 'code'}
if scope is not None:
params['scope'] = scope
if state is not None:
params['state'] = state
return urlunsplit(('https', self.server, '/oauth/authorize', urlencode(params), ''))
def exchange_code_for_token(self, client_id, client_secret, code):
"""
Exchange the temporary authorization code (returned with redirect from strava authorization URL)
for a short-lived access token and a refresh token (used to obtain the next access token later on).
:param client_id: The numeric developer client id.
:type client_id: int
:param client_secret: The developer client secret
:type client_secret: str
:param code: The temporary authorization code
:type code: str
:return: Dictionary containing the access_token, refresh_token
and expires_at (number of seconds since Epoch when the provided access token will expire)
:rtype: dict
"""
response = self._request('https://{0}/oauth/token'.format(self.server),
params={'client_id': client_id, 'client_secret': client_secret, 'code': code,
'grant_type': 'authorization_code'},
method='POST')
access_info = dict()
access_info['access_token'] = response['access_token']
access_info['refresh_token'] = response['refresh_token']
access_info['expires_at'] = response['expires_at']
self.access_token = response['access_token']
return access_info
def refresh_access_token(self, client_id, client_secret, refresh_token):
"""
Exchanges the previous refresh token for a short-lived access token and a new
refresh token (used to obtain the next access token later on).
:param client_id: The numeric developer client id.
:type client_id: int
:param client_secret: The developer client secret
:type client_secret: str
:param refresh_token: The refresh token obtain from a previous authorization request
:type refresh_token: str
:return: Dictionary containing the access_token, refresh_token
and expires_at (number of seconds since Epoch when the provided access token will expire)
:rtype: dict
"""
response = self._request('https://{0}/oauth/token'.format(self.server),
params={'client_id': client_id, 'client_secret': client_secret,
'refresh_token': refresh_token, 'grant_type': 'refresh_token'},
method='POST')
access_info = dict()
access_info['access_token'] = response['access_token']
access_info['refresh_token'] = response['refresh_token']
access_info['expires_at'] = response['expires_at']
self.access_token = response['access_token']
return access_info
def _resolve_url(self, url, use_webhook_server):
server = use_webhook_server and self.server_webhook_events or self.server
if not url.startswith('http'):
url = urljoin('https://{0}'.format(server), self.api_base + '/' + url.strip('/'))
return url
def _request(self, url, params=None, files=None, method='GET', check_for_errors=True, use_webhook_server=False):
"""
Perform the underlying request, returning the parsed JSON results.
:param url: The request URL.
:type url: str
:param params: Request parameters
:type params: Dict[str,Any]
:param files: Dictionary of file name to file-like objects.
:type files: Dict[str,file]
:param method: The request method (GET/POST/etc.)
:type method: str
:param check_for_errors: Whether to raise
:type check_for_errors: bool
:param use_webhook_server: Whether to use the webhook server for this request.
:type use_webhook_server: bool
:return: The parsed JSON response.
:rtype: Dict[str,Any]
"""
url = self._resolve_url(url, use_webhook_server)
self.log.info("{method} {url!r} with params {params!r}".format(method=method, url=url, params=params))
if params is None:
params = {}
if self.access_token:
params['access_token'] = self.access_token
methods = {'GET': self.rsession.get,
'POST': functools.partial(self.rsession.post, files=files),
'PUT': self.rsession.put,
'DELETE': self.rsession.delete}
try:
requester = methods[method.upper()]
except KeyError:
raise ValueError("Invalid/unsupported request method specified: {0}".format(method))
raw = requester(url, params=params)
# Rate limits are taken from HTTP response headers
# https://strava.github.io/api/#rate-limiting
self.rate_limiter(raw.headers)
if check_for_errors:
self._handle_protocol_error(raw)
# 204 = No content
if raw.status_code in [204]:
resp = {}
else:
resp = raw.json()
return resp
def _handle_protocol_error(self, response):
"""
Parses the raw response from the server, raising a :class:`stravalib.exc.Fault` if the
server returned an error.
:param response: The response object.
:raises Fault: If the response contains an error.
"""
error_str = None
try:
json_response = response.json()
except ValueError:
pass
else:
if 'message' in json_response or 'errors' in json_response:
error_str = '{0}: {1}'.format(json_response.get('message', 'Undefined error'), json_response.get('errors'))
# Special subclasses for some errors
msg = None
exc_class = None
if response.status_code == 404:
msg = '%s: %s' % (response.reason, error_str)
exc_class = exc.ObjectNotFound
elif response.status_code == 401:
msg = '%s: %s' % (response.reason, error_str)
exc_class = exc.AccessUnauthorized
elif 400 <= response.status_code < 500:
msg = '%s Client Error: %s [%s]' % (response.status_code, response.reason, error_str)
exc_class = exc.Fault
elif 500 <= response.status_code < 600:
msg = '%s Server Error: %s [%s]' % (response.status_code, response.reason, error_str)
exc_class = exc.Fault
elif error_str:
msg = error_str
exc_class = exc.Fault
if exc_class is not None:
raise exc_class(msg, response=response)
return response
def _extract_referenced_vars(self, s):
"""
Utility method to find the referenced format variables in a string.
(Assumes string.format() format vars.)
:param s: The string that contains format variables. (e.g. "{foo}-text")
:return: The list of referenced variable names. (e.g. ['foo'])
:rtype: list
"""
d = {}
while True:
try:
s.format(**d)
except KeyError as exc:
# exc.args[0] contains the name of the key that was not found;
# 0 is used because it appears to work with all types of placeholders.
d[exc.args[0]] = 0
else:
break
return d.keys()
def get(self, url, check_for_errors=True, use_webhook_server=False, **kwargs):
"""
Performs a generic GET request for specified params, returning the response.
"""
referenced = self._extract_referenced_vars(url)
url = url.format(**kwargs)
params = dict([(k, v) for k, v in kwargs.items() if not k in referenced])
return self._request(url, params=params, check_for_errors=check_for_errors, use_webhook_server=use_webhook_server)
def post(self, url, files=None, check_for_errors=True, use_webhook_server=False, **kwargs):
"""
Performs a generic POST request for specified params, returning the response.
"""
referenced = self._extract_referenced_vars(url)
url = url.format(**kwargs)
params = dict([(k, v) for k, v in kwargs.items() if not k in referenced])
return self._request(url, params=params, files=files, method='POST', check_for_errors=check_for_errors, use_webhook_server=use_webhook_server)
def put(self, url, check_for_errors=True, use_webhook_server=False, **kwargs):
"""
Performs a generic PUT request for specified params, returning the response.
"""
referenced = self._extract_referenced_vars(url)
url = url.format(**kwargs)
params = dict([(k, v) for k, v in kwargs.items() if not k in referenced])
return self._request(url, params=params, method='PUT', check_for_errors=check_for_errors, use_webhook_server=use_webhook_server)
def delete(self, url, check_for_errors=True, use_webhook_server=False, **kwargs):
"""
Performs a generic DELETE request for specified params, returning the response.
"""
referenced = self._extract_referenced_vars(url)
url = url.format(**kwargs)
params = dict([(k, v) for k, v in kwargs.items() if not k in referenced])
return self._request(url, params=params, method='DELETE', check_for_errors=check_for_errors, use_webhook_server=use_webhook_server)
|
the-stack_0_24243 | #!/usr/bin/env python
# ----------------------------------------------------------------
# Programmer(s): Daniel R. Reynolds @ SMU
# ----------------------------------------------------------------
# SUNDIALS Copyright Start
# Copyright (c) 2002-2021, Lawrence Livermore National Security
# and Southern Methodist University.
# All rights reserved.
#
# See the top-level LICENSE and NOTICE files for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# SUNDIALS Copyright End
# ----------------------------------------------------------------
# matplotlib-based plotting script for ODE examples
# imports
import sys
import pylab as plt
import numpy as np
# load solution data file
data = np.loadtxt('solution.txt', dtype=np.double)
# determine number of time steps, number of fields
nt,nv = np.shape(data)
# extract time array
times = data[:,0]
# parse comment line to determine solution names
f = open('solution.txt', 'r')
commentline = f.readline()
commentsplit = commentline.split()
names = commentsplit[2:]
# create plot
plt.figure()
# add curves to figure
for i in range(nv-1):
plt.plot(times,data[:,i+1],label=names[i])
plt.xlabel('t')
if (nv > 2):
plt.ylabel('solutions')
else:
plt.ylabel('solution')
plt.legend(loc='upper right', shadow=True)
plt.grid()
plt.savefig('solution.png')
##### end of script #####
|
the-stack_0_24244 | from pythran.tests import TestEnv
from pythran.typing import List, Set, Dict, NDArray
import numpy as np
class TestNoGil(TestEnv):
def test_list_param(self):
code="""
def list_param(l):
return l, sum(i*j for i in l for j in l)
"""
self.run_test(code, list(range(3000)), list_param=[List[int]],
thread_count=4)
def test_set_param(self):
code="""
def set_param(l):
return {sum(l), sum(i*j for i in l for j in l)}, l
"""
self.run_test(code, set(range(3000)), set_param=[Set[int]],
thread_count=4)
def test_dict_param(self):
code="""
def dict_param(l):
return {sum(i*j for i in l.keys() for j in l.values()): l}, l
"""
self.run_test(code, dict(zip(range(3000), range(3000))),
dict_param=[Dict[int, int]],
thread_count=4)
def test_ndarray_param(self):
code="""
import numpy as np
def ndarray_param(l):
return np.array([i*j for i in l for j in l]), l
"""
self.run_test(code, np.ones(100, dtype=int),
ndarray_param=[NDArray[int, :]],
thread_count=4)
|
the-stack_0_24245 | import OSC
import timeit
wall_clock = timeit.default_timer
from ..sphere import sphere
# manage rotation of the simulated sphere
# and simulation of touches on this sphere via mouse presses
class RotationManager:
def __init__(self, auto_spin=False, osc_port=3333):
self.osc_client = OSC.OSCClient()
self.osc_client.connect(('localhost', osc_port))
self.auto_spin = auto_spin
self.rotation = [-90,0]
self.last_rotation = [0,0]
self.rotate_scale = -0.2
self.frame_ctr = 0
self.drag_start = None
self.last_touch = wall_clock()
self.spin = 0
self.touch_id = 0
self.target_spin = 0
self.auto_spin = False
# manage simulated touches on the sphere
self.touch_is_down = False
self.touch_pos = (0,0)
self._sphere_point = None # updated all the time in lon, lat format
self.sphere_point = (-1, -1) # updated only while the (right) mouse is down
self.locked = False
def lock_rotation(self):
self.locked = True
def unlock_rotation(self):
self.locked = False
def send_osc(self, addr, elements):
"""Send a message to the given address
using the open OSC client connection"""
oscmsg = OSC.OSCMessage()
oscmsg.setAddress(addr)
for elt in elements:
oscmsg.append(elt)
self.osc_client.send(oscmsg)
def send_touch(self, polar=None):
"""Send the simulated touches over OSC"""
self.send_osc("/tuio/2Dcur", ['alive'])
if polar:
lon, lat = polar
tuio = sphere.polar_to_tuio(lon, lat)
self.send_osc("/tuio/2Dcur", ['set', self.touch_id, tuio[0], 1.0-tuio[1]])
self.send_osc("/tuio/2Dcur", ['fseq', self.frame_ctr])
self.frame_ctr += 1
# left mouse down
def press(self, x, y):
self.drag_start = (x,y)
self.last_rotation = list(self.rotation)
self.last_touch = wall_clock()
# left mouse drag; spin the sphere
def drag(self, x, y):
if self.drag_start is not None:
new_pos = (x,y)
self.rotation[0] = self.last_rotation[0] + (self.drag_start[0] - new_pos[0]) * self.rotate_scale
self.rotation[1] = self.last_rotation[1] + (self.drag_start[1] - new_pos[1]) * self.rotate_scale * 0.5
self.last_touch = wall_clock()
else:
# we started dragging without a press?!
# just simulate the press
self.press(x, y)
# left mouse up; sphere relaxes
def release(self,x,y):
self.drag_start = None
self.last_touch = wall_clock()
# return active touch point
def get_touch_point(self):
if self.touch_is_down:
return self._sphere_point
else:
return None
# get position of mouse in screen coords
def get_mouse_pos(self):
if self.touch_is_down:
return self.touch_pos
else:
return None
# simulated touch point down
def touch_down(self, x, y):
self.touch_is_down = True
self.touch_pos = (x,y)
# simulated touch point up
def touch_release(self, x, y):
self.touch_is_down = False
self._sphere_point = None
self.touch_id += 1 # make sure we have unique ids for each simulated touch
# simulated touch point moves
def touch_drag(self, x, y):
self.touch_pos = (x,y)
if not self.touch_is_down:
self.touch_down(x,y)
def tick(self):
# autorotate the sphere if needed
if wall_clock()-self.last_touch>3 and self.auto_spin:
self.target_spin = 0.2
else:
self.target_spin = 0.0
# smooth out polar rotation
self.spin = 0.8 *self.spin + 0.2*self.target_spin
self.rotation[0] += self.spin
# relax sphere back towards equatorial alignment with horizontal
if self.drag_start is None and not self.locked:
self.rotation[1] *= 0.95
# send tuio if the touch is down
if self.touch_is_down and self._sphere_point is not None:
self.send_touch(self._sphere_point)
else:
self.send_touch(None)
# return overall rotation of the sphere as lon,lat pair
def get_rotation(self):
return self.rotation
def get_sphere_touch(self):
if self.touch_is_down:
return self._sphere_point
else:
return None
def set_sphere_touch(self, lon, lat):
# note: this should only be read while the mouse button is down
# outside of a mouse down event, this will change as the sphere
# is rotated, which won't be the desired effect!
self._sphere_point = (lon, lat)
|
the-stack_0_24248 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from flexget.task import TaskAbort
class TestAbortIfExists(object):
config = """
templates:
global:
disable: [seen]
mock:
- {title: 'test', location: 'mock://some_file.lftp-get-status'}
- {title: 'test2', location: 'mock://some_file.mkv'}
tasks:
test_abort:
abort_if_exists:
regexp: '.*\.lftp-get-status'
field: 'location'
test_not_abort:
abort_if_exists:
regexp: '.*\.lftp-get-status'
field: 'title'
"""
def test_abort(self, execute_task):
with pytest.raises(TaskAbort):
task = execute_task('test_abort')
def test_not_abort(self, execute_task):
task = execute_task('test_not_abort')
assert not task.aborted, 'Task should have aborted'
|
the-stack_0_24250 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import time
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
sns.set()
# In[2]:
mnist = input_data.read_data_sets("", validation_size=0)
# In[3]:
class Model:
def __init__(self):
self.X = tf.placeholder(tf.float32, [None, 784])
self.Y = tf.placeholder(tf.float32, [None, 10])
w1 = tf.Variable(tf.random_normal([784, 256], stddev=np.sqrt(1 / 784)))
b1 = tf.Variable(tf.random_normal([256], stddev=0))
w2 = tf.Variable(tf.random_normal([256, 100], stddev=np.sqrt(1 / 256)))
b2 = tf.Variable(tf.random_normal([100], stddev=0))
w3 = tf.Variable(tf.random_normal([100, 10], stddev=np.sqrt(1 / 100)))
b3 = tf.Variable(tf.random_normal([10], stddev=0))
feedforward = tf.nn.selu(tf.matmul(self.X, w1) + b1)
feeddropout = tf.contrib.nn.alpha_dropout(feedforward, 0.5)
feedforward = tf.nn.selu(tf.matmul(feeddropout, w2) + b2)
feeddropout = tf.contrib.nn.alpha_dropout(feedforward, 0.5)
self.logits = tf.matmul(feeddropout, w3) + b3
self.cost = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=self.Y, logits=self.logits)
)
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.05).minimize(self.cost)
correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# In[4]:
batch_size = 128
epoch = 10
train_images = (mnist.train.images - np.mean(mnist.train.images)) / np.std(mnist.train.images)
test_images = (mnist.test.images - np.mean(mnist.test.images)) / np.std(mnist.test.images)
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model()
sess.run(tf.global_variables_initializer())
# In[5]:
LOSS, ACC_TRAIN, ACC_TEST = [], [], []
for i in range(epoch):
total_loss, total_acc = 0, 0
for n in range(0, (mnist.train.images.shape[0] // batch_size) * batch_size, batch_size):
batch_x = train_images[n : n + batch_size, :]
batch_y = np.zeros((batch_size, 10))
batch_y[np.arange(batch_size), mnist.train.labels[n : n + batch_size]] = 1.0
cost, _ = sess.run(
[model.cost, model.optimizer], feed_dict={model.X: batch_x, model.Y: batch_y}
)
total_acc += sess.run(model.accuracy, feed_dict={model.X: batch_x, model.Y: batch_y})
total_loss += cost
total_loss /= mnist.train.images.shape[0] // batch_size
total_acc /= mnist.train.images.shape[0] // batch_size
ACC_TRAIN.append(total_acc)
total_acc = 0
for n in range(
0, (mnist.test.images[:1000, :].shape[0] // batch_size) * batch_size, batch_size
):
batch_x = test_images[n : n + batch_size, :]
batch_y = np.zeros((batch_size, 10))
batch_y[np.arange(batch_size), mnist.test.labels[n : n + batch_size]] = 1.0
total_acc += sess.run(model.accuracy, feed_dict={model.X: batch_x, model.Y: batch_y})
total_acc /= mnist.test.images[:1000, :].shape[0] // batch_size
ACC_TEST.append(total_acc)
print(
"epoch: %d, accuracy train: %f, accuracy testing: %f" % (i + 1, ACC_TRAIN[-1], ACC_TEST[-1])
)
# In[ ]:
|
the-stack_0_24251 | import pandas as pd
import numpy as np
import tensorflow_hub as hub
import tensorflow as tf
import joblib
from loader import data_loader
import warnings;
warnings.filterwarnings('ignore')
datapath = '/media/einhard/Seagate Expansion Drive/3380_data/data/'
print('Loading USE...')
embed = hub.load('/media/einhard/Seagate Expansion Drive/3380_data/tensorflow_hub/universal-sentence-encoder_4')
print('Success!')
print('Loading word embeddings')
sentence_array = joblib.load('/media/einhard/Seagate Expansion Drive/3380_data/3380-Book-Recommendations/Models/Summarizer/reviewEmbeddings.pkl')
print('Loading reviews and books')
books, reviews = data_loader(datapath, 'filtered_books.csv', 'filtered_reviews.csv')
def find_reviews(query,reviews=reviews, n_results=10):
# Create vector from query and compare with global embedding
sentence = [query]
sentence_vector = np.array(embed(sentence))
inner_product = np.inner(sentence_vector, sentence_array)[0]
# Find sentences with highest inner products
top_n_sentences = pd.Series(inner_product).nlargest(n_results+1)
top_n_indices = top_n_sentences.index.tolist()
top_n_list = list(reviews.review_text.iloc[top_n_indices][1:])
print(f'Input sentence: "{query}"\n')
print(f'{n_results} most semantically similar reviews: \n\n')
print(*top_n_list, sep='\n\n')
return top_n_indices
def find_books(query, reviews=reviews, books=books):
top_n_indices = find_reviews(query)
return books[books.book_id.isin(reviews.iloc[top_ten_indices].book_id.tolist())][['title', 'name','description', 'weighted_score']]
find_books("I've been waiting for a hard sci-fi novel for a long time")
|
the-stack_0_24252 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 13:57:55 2018
@author: Denis
"""
from .utils import infos_irisdates
class sji_frame_infos:
def __init__(self, height, large, wavelength, year, month, day, hour, minute, second, millisecond, timestamp, xcenter, ycenter, xfov, yfov, xcenterslit, ycenterslit):
self.height = height
self.large = large
self.wavelength = wavelength
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.millisecond = millisecond
self.timestamp = timestamp
self.xcenter = xcenter
self.ycenter = ycenter
self.xfov = xfov
self.yfov = yfov
self.xcenterslit = xcenterslit
self.ycenterslit = ycenterslit
def sji_frameinfos_loader(sji_data, i):
(height, large) = sji_data.get_image_step(i).shape
header = sji_data.headers[i]
wavelength = int(header['TWAVE1'])
(year, month, day, hour, minute, second, millisecond) = infos_irisdates(header['DATE_OBS'])
timestamp = sji_data.get_timestamps()[i]
xcenter = float(header['XCEN'])
ycenter = float(header['YCEN'])
xfov = float(header['FOVX'])
yfov = float(header['FOVY'])
xcenterslit = float(header['SLTPX1IX'])
ycenterslit = float(header['SLTPX2IX'])
return sji_frame_infos(height, large, wavelength, year, month, day, hour, minute, second, millisecond, timestamp, xcenter, ycenter, xfov, yfov, xcenterslit, ycenterslit) |
the-stack_0_24253 | from tuneta.tune_ta import TuneTA
import pandas as pd
from pandas_ta import percent_return
from sklearn.model_selection import train_test_split
import yfinance as yf
if __name__ == "__main__":
# Download data set from yahoo, calculate next day return and split into train and test
X = yf.download("SPY", period="10y", interval="1d", auto_adjust=True)
y = percent_return(X.Close, offset=-1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, shuffle=False)
# Initialize with x cores and show trial results
tt = TuneTA(n_jobs=4, verbose=True)
# Optimize indicators
tt.fit(X_train, y_train,
indicators=['tta.RSI'],
ranges=[(2, 30), (31, 180)],
trials=500,
early_stop=100,
)
# Show correlation of indicators
tt.report(target_corr=True, features_corr=True)
# Add indicators to X_train
features = tt.transform(X_train)
X_train = pd.concat([X_train, features], axis=1)
# Add same indicators to X_test
features = tt.transform(X_test)
X_test = pd.concat([X_test, features], axis=1)
|
the-stack_0_24256 | # -*- coding: utf-8 -*-
"""Implements file-like objects for reading and writing from/to S3."""
import io
import contextlib
import functools
import logging
import warnings
import boto3
import botocore.client
import six
import sys
import smart_open.bytebuffer
logger = logging.getLogger(__name__)
# Multiprocessing is unavailable in App Engine (and possibly other sandboxes).
# The only method currently relying on it is iter_bucket, which is instructed
# whether to use it by the MULTIPROCESSING flag.
_MULTIPROCESSING = False
try:
import multiprocessing.pool
_MULTIPROCESSING = True
except ImportError:
warnings.warn("multiprocessing could not be imported and won't be used")
DEFAULT_MIN_PART_SIZE = 50 * 1024**2
"""Default minimum part size for S3 multipart uploads"""
MIN_MIN_PART_SIZE = 5 * 1024 ** 2
"""The absolute minimum permitted by Amazon."""
READ_BINARY = 'rb'
WRITE_BINARY = 'wb'
MODES = (READ_BINARY, WRITE_BINARY)
"""Allowed I/O modes for working with S3."""
_BINARY_TYPES = (six.binary_type, bytearray)
"""Allowed binary buffer types for writing to the underlying S3 stream"""
if sys.version_info >= (2, 7):
_BINARY_TYPES = (six.binary_type, bytearray, memoryview)
BINARY_NEWLINE = b'\n'
SUPPORTED_SCHEMES = ("s3", "s3n", 's3u', "s3a")
DEFAULT_BUFFER_SIZE = 128 * 1024
START = 0
CURRENT = 1
END = 2
WHENCE_CHOICES = [START, CURRENT, END]
def clamp(value, minval, maxval):
return max(min(value, maxval), minval)
def make_range_string(start, stop=None):
#
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
#
if stop is None:
return 'bytes=%d-' % start
return 'bytes=%d-%d' % (start, stop)
def open(
bucket_id,
key_id,
mode,
buffer_size=DEFAULT_BUFFER_SIZE,
min_part_size=DEFAULT_MIN_PART_SIZE,
session=None,
resource_kwargs=None,
multipart_upload_kwargs=None,
):
"""Open an S3 object for reading or writing.
Parameters
----------
bucket_id: str
The name of the bucket this object resides in.
key_id: str
The name of the key within the bucket.
mode: str
The mode for opening the object. Must be either "rb" or "wb".
buffer_size: int, optional
The buffer size to use when performing I/O.
min_part_size: int, optional
The minimum part size for multipart uploads. For writing only.
session: object, optional
The S3 session to use when working with boto3.
resource_kwargs: dict, optional
Keyword arguments to use when accessing the S3 resource for reading or writing.
multipart_upload_kwargs: dict, optional
Additional parameters to pass to boto3's initiate_multipart_upload function.
For writing only.
"""
logger.debug('%r', locals())
if mode not in MODES:
raise NotImplementedError('bad mode: %r expected one of %r' % (mode, MODES))
if resource_kwargs is None:
resource_kwargs = {}
if multipart_upload_kwargs is None:
multipart_upload_kwargs = {}
if mode == READ_BINARY:
fileobj = SeekableBufferedInputBase(
bucket_id,
key_id,
buffer_size=buffer_size,
session=session,
resource_kwargs=resource_kwargs,
)
elif mode == WRITE_BINARY:
fileobj = BufferedOutputBase(
bucket_id,
key_id,
min_part_size=min_part_size,
session=session,
multipart_upload_kwargs=multipart_upload_kwargs,
resource_kwargs=resource_kwargs,
)
else:
assert False, 'unexpected mode: %r' % mode
return fileobj
class RawReader(object):
"""Read an S3 object."""
def __init__(self, s3_object):
self.position = 0
self._object = s3_object
self._body = s3_object.get()['Body']
def read(self, size=-1):
if size == -1:
return self._body.read()
return self._body.read(size)
class SeekableRawReader(object):
"""Read an S3 object."""
def __init__(self, s3_object):
self._object = s3_object
try:
self._content_length = self._object.content_length
except botocore.client.ClientError:
raise ValueError('the s3 key %r does not exist, or is forbidden for access' % s3_object.key)
self.seek(0)
def seek(self, position):
"""Seek to the specified position (byte offset) in the S3 key.
:param int position: The byte offset from the beginning of the key.
"""
self._position = position
range_string = make_range_string(self._position)
logger.debug('content_length: %r range_string: %r', self._content_length, range_string)
#
# Close old body explicitly.
# When first seek(), self._body is not exist. Catch the exception and do nothing.
#
try:
self._body.close()
except AttributeError:
pass
if position == self._content_length == 0 or position == self._content_length:
#
# When reading, we can't seek to the first byte of an empty file.
# Similarly, we can't seek past the last byte. Do nothing here.
#
self._body = io.BytesIO()
else:
self._body = self._object.get(Range=range_string)['Body']
def read(self, size=-1):
if self._position >= self._content_length:
return b''
if size == -1:
binary = self._body.read()
else:
binary = self._body.read(size)
self._position += len(binary)
return binary
class BufferedInputBase(io.BufferedIOBase):
def __init__(self, bucket, key, buffer_size=DEFAULT_BUFFER_SIZE,
line_terminator=BINARY_NEWLINE, session=None, resource_kwargs=None):
if session is None:
session = boto3.Session()
if resource_kwargs is None:
resource_kwargs = {}
s3 = session.resource('s3', **resource_kwargs)
self._object = s3.Object(bucket, key)
self._raw_reader = RawReader(self._object)
self._content_length = self._object.content_length
self._current_pos = 0
self._buffer = smart_open.bytebuffer.ByteBuffer(buffer_size)
self._eof = False
self._line_terminator = line_terminator
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
#
# Override some methods from io.IOBase.
#
def close(self):
"""Flush and close this stream."""
logger.debug("close: called")
self._object = None
def readable(self):
"""Return True if the stream can be read from."""
return True
def seekable(self):
return False
#
# io.BufferedIOBase methods.
#
def detach(self):
"""Unsupported."""
raise io.UnsupportedOperation
def read(self, size=-1):
"""Read up to size bytes from the object and return them."""
if size == 0:
return b''
elif size < 0:
from_buf = self._read_from_buffer()
self._current_pos = self._content_length
return from_buf + self._raw_reader.read()
#
# Return unused data first
#
if len(self._buffer) >= size:
return self._read_from_buffer(size)
#
# If the stream is finished, return what we have.
#
if self._eof:
return self._read_from_buffer()
#
# Fill our buffer to the required size.
#
# logger.debug('filling %r byte-long buffer up to %r bytes', len(self._buffer), size)
self._fill_buffer(size)
return self._read_from_buffer(size)
def read1(self, size=-1):
"""This is the same as read()."""
return self.read(size=size)
def readinto(self, b):
"""Read up to len(b) bytes into b, and return the number of bytes
read."""
data = self.read(len(b))
if not data:
return 0
b[:len(data)] = data
return len(data)
def readline(self, limit=-1):
"""Read up to and including the next newline. Returns the bytes read."""
if limit != -1:
raise NotImplementedError('limits other than -1 not implemented yet')
the_line = io.BytesIO()
while not (self._eof and len(self._buffer) == 0):
#
# In the worst case, we're reading the unread part of self._buffer
# twice here, once in the if condition and once when calling index.
#
# This is sub-optimal, but better than the alternative: wrapping
# .index in a try..except, because that is slower.
#
remaining_buffer = self._buffer.peek()
if self._line_terminator in remaining_buffer:
next_newline = remaining_buffer.index(self._line_terminator)
the_line.write(self._read_from_buffer(next_newline + 1))
break
else:
the_line.write(self._read_from_buffer())
self._fill_buffer()
return the_line.getvalue()
def terminate(self):
"""Do nothing."""
pass
#
# Internal methods.
#
def _read_from_buffer(self, size=-1):
"""Remove at most size bytes from our buffer and return them."""
# logger.debug('reading %r bytes from %r byte-long buffer', size, len(self._buffer))
size = size if size >= 0 else len(self._buffer)
part = self._buffer.read(size)
self._current_pos += len(part)
# logger.debug('part: %r', part)
return part
def _fill_buffer(self, size=-1):
size = size if size >= 0 else self._buffer._chunk_size
while len(self._buffer) < size and not self._eof:
bytes_read = self._buffer.fill(self._raw_reader)
if bytes_read == 0:
logger.debug('reached EOF while filling buffer')
self._eof = True
class SeekableBufferedInputBase(BufferedInputBase):
"""Reads bytes from S3.
Implements the io.BufferedIOBase interface of the standard library."""
def __init__(self, bucket, key, buffer_size=DEFAULT_BUFFER_SIZE,
line_terminator=BINARY_NEWLINE, session=None, resource_kwargs=None):
if session is None:
session = boto3.Session()
if resource_kwargs is None:
resource_kwargs = {}
s3 = session.resource('s3', **resource_kwargs)
self._object = s3.Object(bucket, key)
self._raw_reader = SeekableRawReader(self._object)
self._content_length = self._object.content_length
self._current_pos = 0
self._buffer = smart_open.bytebuffer.ByteBuffer(buffer_size)
self._eof = False
self._line_terminator = line_terminator
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
def seekable(self):
"""If False, seek(), tell() and truncate() will raise IOError.
We offer only seek support, and no truncate support."""
return True
def seek(self, offset, whence=START):
"""Seek to the specified position.
:param int offset: The offset in bytes.
:param int whence: Where the offset is from.
Returns the position after seeking."""
logger.debug('seeking to offset: %r whence: %r', offset, whence)
if whence not in WHENCE_CHOICES:
raise ValueError('invalid whence, expected one of %r' % WHENCE_CHOICES)
if whence == START:
new_position = offset
elif whence == CURRENT:
new_position = self._current_pos + offset
else:
new_position = self._content_length + offset
new_position = clamp(new_position, 0, self._content_length)
self._current_pos = new_position
self._raw_reader.seek(new_position)
logger.debug('new_position: %r', self._current_pos)
self._buffer.empty()
self._eof = self._current_pos == self._content_length
return self._current_pos
def tell(self):
"""Return the current position within the file."""
return self._current_pos
def truncate(self, size=None):
"""Unsupported."""
raise io.UnsupportedOperation
class BufferedOutputBase(io.BufferedIOBase):
"""Writes bytes to S3.
Implements the io.BufferedIOBase interface of the standard library."""
def __init__(
self,
bucket,
key,
min_part_size=DEFAULT_MIN_PART_SIZE,
session=None,
resource_kwargs=None,
multipart_upload_kwargs=None,
):
if min_part_size < MIN_MIN_PART_SIZE:
logger.warning("S3 requires minimum part size >= 5MB; \
multipart upload may fail")
if session is None:
session = boto3.Session()
if resource_kwargs is None:
resource_kwargs = {}
if multipart_upload_kwargs is None:
multipart_upload_kwargs = {}
s3 = session.resource('s3', **resource_kwargs)
try:
self._object = s3.Object(bucket, key)
self._min_part_size = min_part_size
self._mp = self._object.initiate_multipart_upload(**multipart_upload_kwargs)
except botocore.client.ClientError:
raise ValueError('the bucket %r does not exist, or is forbidden for access' % bucket)
self._buf = io.BytesIO()
self._total_bytes = 0
self._total_parts = 0
self._parts = []
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
def flush(self):
pass
#
# Override some methods from io.IOBase.
#
def close(self):
logger.debug("closing")
if self._buf.tell():
self._upload_next_part()
if self._total_bytes and self._mp:
self._mp.complete(MultipartUpload={'Parts': self._parts})
logger.debug("completed multipart upload")
elif self._mp:
#
# AWS complains with "The XML you provided was not well-formed or
# did not validate against our published schema" when the input is
# completely empty => abort the upload, no file created.
#
# We work around this by creating an empty file explicitly.
#
logger.info("empty input, ignoring multipart upload")
assert self._mp, "no multipart upload in progress"
self._mp.abort()
self._object.put(Body=b'')
self._mp = None
logger.debug("successfully closed")
@property
def closed(self):
return self._mp is None
def writable(self):
"""Return True if the stream supports writing."""
return True
def tell(self):
"""Return the current stream position."""
return self._total_bytes
#
# io.BufferedIOBase methods.
#
def detach(self):
raise io.UnsupportedOperation("detach() not supported")
def write(self, b):
"""Write the given bytes (binary string) to the S3 file.
There's buffering happening under the covers, so this may not actually
do any HTTP transfer right away."""
if not isinstance(b, _BINARY_TYPES):
raise TypeError(
"input must be one of %r, got: %r" % (_BINARY_TYPES, type(b)))
self._buf.write(b)
self._total_bytes += len(b)
if self._buf.tell() >= self._min_part_size:
self._upload_next_part()
return len(b)
def terminate(self):
"""Cancel the underlying multipart upload."""
assert self._mp, "no multipart upload in progress"
self._mp.abort()
self._mp = None
#
# Internal methods.
#
def _upload_next_part(self):
part_num = self._total_parts + 1
logger.info("uploading part #%i, %i bytes (total %.3fGB)",
part_num, self._buf.tell(), self._total_bytes / 1024.0 ** 3)
self._buf.seek(0)
part = self._mp.Part(part_num)
upload = part.upload(Body=self._buf)
self._parts.append({'ETag': upload['ETag'], 'PartNumber': part_num})
logger.debug("upload of part #%i finished" % part_num)
self._total_parts += 1
self._buf = io.BytesIO()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.terminate()
else:
self.close()
def iter_bucket(bucket_name, prefix='', accept_key=None,
key_limit=None, workers=16, retries=3):
"""
Iterate and download all S3 objects under `s3://bucket_name/prefix`.
Parameters
----------
bucket_name: str
The name of the bucket.
prefix: str, optional
Limits the iteration to keys starting wit the prefix.
accept_key: callable, optional
This is a function that accepts a key name (unicode string) and
returns True/False, signalling whether the given key should be downloaded.
The default behavior is to accept all keys.
key_limit: int, optional
If specified, the iterator will stop after yielding this many results.
workers: int, optional
The number of subprocesses to use.
retries: int, optional
The number of time to retry a failed download.
Yields
------
str
The full key name (does not include the bucket name).
bytes
The full contents of the key.
Notes
-----
The keys are processed in parallel, using `workers` processes (default: 16),
to speed up downloads greatly. If multiprocessing is not available, thus
_MULTIPROCESSING is False, this parameter will be ignored.
Examples
--------
>>> # get all JSON files under "mybucket/foo/"
>>> for key, content in iter_bucket(bucket_name, prefix='foo/', accept_key=lambda key: key.endswith('.json')):
... print key, len(content)
>>> # limit to 10k files, using 32 parallel workers (default is 16)
>>> for key, content in iter_bucket(bucket_name, key_limit=10000, workers=32):
... print key, len(content)
"""
if accept_key is None:
accept_key = lambda key: True
#
# If people insist on giving us bucket instances, silently extract the name
# before moving on. Works for boto3 as well as boto.
#
try:
bucket_name = bucket_name.name
except AttributeError:
pass
total_size, key_no = 0, -1
key_iterator = _list_bucket(bucket_name, prefix=prefix, accept_key=accept_key)
download_key = functools.partial(_download_key, bucket_name=bucket_name, retries=retries)
with _create_process_pool(processes=workers) as pool:
result_iterator = pool.imap_unordered(download_key, key_iterator)
for key_no, (key, content) in enumerate(result_iterator):
if True or key_no % 1000 == 0:
logger.info(
"yielding key #%i: %s, size %i (total %.1fMB)",
key_no, key, len(content), total_size / 1024.0 ** 2
)
yield key, content
total_size += len(content)
if key_limit is not None and key_no + 1 >= key_limit:
# we were asked to output only a limited number of keys => we're done
break
logger.info("processed %i keys, total size %i" % (key_no + 1, total_size))
def _list_bucket(bucket_name, prefix='', accept_key=lambda k: True):
client = boto3.client('s3')
ctoken = None
while True:
# list_objects_v2 doesn't like a None value for ContinuationToken
# so we don't set it if we don't have one.
if ctoken:
response = client.list_objects_v2(Bucket=bucket_name, Prefix=prefix, ContinuationToken=ctoken)
else:
response = client.list_objects_v2(Bucket=bucket_name, Prefix=prefix)
try:
content = response['Contents']
except KeyError:
pass
else:
for c in content:
key = c['Key']
if accept_key(key):
yield key
ctoken = response.get('NextContinuationToken', None)
if not ctoken:
break
def _download_key(key_name, bucket_name=None, retries=3):
if bucket_name is None:
raise ValueError('bucket_name may not be None')
#
# https://geekpete.com/blog/multithreading-boto3/
#
session = boto3.session.Session()
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
# Sometimes, https://github.com/boto/boto/issues/2409 can happen because of network issues on either side.
# Retry up to 3 times to ensure its not a transient issue.
for x in range(retries + 1):
try:
content_bytes = _download_fileobj(bucket, key_name)
except botocore.client.ClientError:
# Actually fail on last pass through the loop
if x == retries:
raise
# Otherwise, try again, as this might be a transient timeout
pass
else:
return key_name, content_bytes
def _download_fileobj(bucket, key_name):
#
# This is a separate function only because it makes it easier to inject
# exceptions during tests.
#
buf = io.BytesIO()
bucket.download_fileobj(key_name, buf)
return buf.getvalue()
class DummyPool(object):
"""A class that mimics multiprocessing.pool.Pool for our purposes."""
def imap_unordered(self, function, items):
return six.moves.map(function, items)
def terminate(self):
pass
@contextlib.contextmanager
def _create_process_pool(processes=1):
if _MULTIPROCESSING and processes:
logger.info("creating pool with %i workers", processes)
pool = multiprocessing.pool.Pool(processes=processes)
else:
logger.info("creating dummy pool")
pool = DummyPool()
yield pool
pool.terminate()
|
the-stack_0_24258 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance stats constants and helpers for libFuzzer."""
import re
from clusterfuzz._internal.bot.fuzzers import dictionary_manager
from clusterfuzz._internal.bot.fuzzers import utils as fuzzer_utils
from clusterfuzz._internal.bot.fuzzers.libFuzzer import constants
from clusterfuzz._internal.fuzzing import strategy
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
from clusterfuzz.stacktraces import constants as stacktrace_constants
# Regular expressions to detect different types of crashes.
LEAK_TESTCASE_REGEX = re.compile(r'.*ERROR: LeakSanitizer.*')
LIBFUZZER_BAD_INSTRUMENTATION_REGEX = re.compile(
r'.*ERROR:.*Is the code instrumented for coverage.*')
LIBFUZZER_CRASH_TYPE_REGEX = r'.*Test unit written to.*{type}'
LIBFUZZER_CRASH_START_MARKER = r'.*ERROR: (libFuzzer|.*Sanitizer):'
LIBFUZZER_ANY_CRASH_TYPE_REGEX = re.compile(
r'(%s|%s)' % (LIBFUZZER_CRASH_START_MARKER,
LIBFUZZER_CRASH_TYPE_REGEX.format(type='')))
LIBFUZZER_CRASH_TESTCASE_REGEX = re.compile(
LIBFUZZER_CRASH_TYPE_REGEX.format(type='crash'))
LIBFUZZER_OOM_TESTCASE_REGEX = re.compile(
LIBFUZZER_CRASH_TYPE_REGEX.format(type='oom'))
LIBFUZZER_SLOW_UNIT_TESTCASE_REGEX = re.compile(
LIBFUZZER_CRASH_TYPE_REGEX.format(type='slow-unit'))
LIBFUZZER_TIMEOUT_TESTCASE_REGEX = re.compile(
LIBFUZZER_CRASH_TYPE_REGEX.format(type='timeout'))
# Regular expressions to detect different sections of logs.
LIBFUZZER_FUZZING_STRATEGIES = re.compile(r'cf::fuzzing_strategies:\s*(.*)')
LIBFUZZER_LOG_DICTIONARY_REGEX = re.compile(r'Dictionary: \d+ entries')
LIBFUZZER_LOG_END_REGEX = re.compile(r'Done\s+\d+\s+runs.*')
LIBFUZZER_LOG_IGNORE_REGEX = re.compile(r'.*WARNING:.*Sanitizer')
LIBFUZZER_LOG_LINE_REGEX = re.compile(
r'^#\d+[\s]*(READ|INITED|NEW|pulse|REDUCE|RELOAD|DONE|:)\s.*')
LIBFUZZER_LOG_SEED_CORPUS_INFO_REGEX = re.compile(
r'INFO:\s+seed corpus:\s+files:\s+(\d+).*rss:\s+(\d+)Mb.*')
LIBFUZZER_LOG_START_INITED_REGEX = re.compile(
r'(#\d+\s+INITED\s+|INFO:\s+-fork=\d+:\s+fuzzing in separate process).*')
LIBFUZZER_MERGE_LOG_STATS_REGEX = re.compile(
r'MERGE-OUTER:\s+\d+\s+new files with'
r'\s+(\d+)\s+new features added;'
r'\s+(\d+)\s+new coverage edges.*')
LIBFUZZER_MODULES_LOADED_REGEX = re.compile(
r'^INFO:\s+Loaded\s+\d+\s+(modules|PC tables)\s+\((\d+)\s+.*\).*')
# Regular expressions to extract different values from the log.
LIBFUZZER_LOG_MAX_LEN_REGEX = re.compile(
r'.*-max_len is not provided; libFuzzer will not generate inputs larger'
r' than (\d+) bytes.*')
def calculate_log_lines(log_lines):
"""Calculate number of logs lines of different kind in the given log."""
# Counters to be returned.
libfuzzer_lines_count = 0
other_lines_count = 0
ignored_lines_count = 0
lines_after_last_libfuzzer_line_count = 0
libfuzzer_inited = False
found_libfuzzer_crash = False
for line in log_lines:
if not libfuzzer_inited:
# Skip to start of libFuzzer log output.
if LIBFUZZER_LOG_START_INITED_REGEX.match(line):
libfuzzer_inited = True
else:
ignored_lines_count += 1
continue
if LIBFUZZER_LOG_IGNORE_REGEX.match(line):
# We should ignore lines like sanitizer warnings, etc.
ignored_lines_count += 1
continue
if LIBFUZZER_ANY_CRASH_TYPE_REGEX.match(line):
# We should ignore whole block if a libfuzzer crash is found.
# E.g. slow units.
found_libfuzzer_crash = True
elif LIBFUZZER_LOG_LINE_REGEX.match(line):
if found_libfuzzer_crash:
# Ignore previous block.
other_lines_count -= lines_after_last_libfuzzer_line_count
ignored_lines_count += lines_after_last_libfuzzer_line_count
libfuzzer_lines_count += 1
lines_after_last_libfuzzer_line_count = 0
found_libfuzzer_crash = False
elif LIBFUZZER_LOG_END_REGEX.match(line):
libfuzzer_lines_count += 1
break
else:
other_lines_count += 1
lines_after_last_libfuzzer_line_count += 1
# Ignore the lines after the last libfuzzer line.
other_lines_count -= lines_after_last_libfuzzer_line_count
ignored_lines_count += lines_after_last_libfuzzer_line_count
return other_lines_count, libfuzzer_lines_count, ignored_lines_count
def strategy_column_name(strategy_name):
"""Convert the strategy name into stats column name."""
return 'strategy_%s' % strategy_name
def parse_fuzzing_strategies(log_lines, strategies):
"""Extract stats for fuzzing strategies used."""
if not strategies:
# Extract strategies from the log.
for line in log_lines:
match = LIBFUZZER_FUZZING_STRATEGIES.match(line)
if match:
strategies = match.group(1).split(',')
break
return process_strategies(strategies)
def process_strategies(strategies, name_modifier=strategy_column_name):
"""Process strategies, parsing any stored values."""
stats = {}
def parse_line_for_strategy_prefix(line, strategy_name):
"""Parse log line to find the value of a strategy with a prefix."""
strategy_prefix = strategy_name + '_'
if not line.startswith(strategy_prefix):
return
suffix_type = strategy.LIBFUZZER_STRATEGIES_WITH_PREFIX_VALUE_TYPE[
strategy_name]
try:
strategy_value = suffix_type(line[len(strategy_prefix):])
stats[name_modifier(strategy_name)] = strategy_value
except (IndexError, ValueError) as e:
logs.log_error('Failed to parse strategy "%s":\n%s\n' % (line, str(e)))
# These strategies are used with different values specified in the prefix.
for strategy_type in strategy.LIBFUZZER_STRATEGIES_WITH_PREFIX_VALUE:
for line in strategies:
parse_line_for_strategy_prefix(line, strategy_type.name)
# Other strategies are either ON or OFF, without arbitrary values.
for strategy_type in strategy.LIBFUZZER_STRATEGIES_WITH_BOOLEAN_VALUE:
if strategy_type.name in strategies:
stats[name_modifier(strategy_type.name)] = 1
return stats
def parse_performance_features(log_lines, strategies, arguments):
"""Extract stats for performance analysis."""
# TODO(ochang): Remove include_strategies once refactor is complete.
# Initialize stats with default values.
stats = {
'bad_instrumentation': 0,
'corpus_crash_count': 0,
'corpus_size': 0,
'crash_count': 0,
'dict_used': 0,
'edge_coverage': 0,
'edges_total': 0,
'feature_coverage': 0,
'initial_edge_coverage': 0,
'initial_feature_coverage': 0,
'leak_count': 0,
'log_lines_unwanted': 0,
'log_lines_from_engine': 0,
'log_lines_ignored': 0,
'max_len': 0,
'manual_dict_size': 0,
'merge_edge_coverage': 0,
'new_edges': 0,
'new_features': 0,
'oom_count': 0,
'recommended_dict_size': 0,
'slow_unit_count': 0,
'slow_units_count': 0,
'startup_crash_count': 1,
'timeout_count': 0,
}
# Extract strategy selection method.
# TODO(ochang): Move to more general place?
stats['strategy_selection_method'] = environment.get_value(
'STRATEGY_SELECTION_METHOD', default_value='default')
# Initialize all strategy stats as disabled by default.
for strategy_type in strategy.LIBFUZZER_STRATEGY_LIST:
if strategy.LIBFUZZER_STRATEGIES_WITH_PREFIX_VALUE_TYPE.get(
strategy_type.name) == str:
stats[strategy_column_name(strategy_type.name)] = ''
else:
stats[strategy_column_name(strategy_type.name)] = 0
# Process fuzzing strategies used.
stats.update(parse_fuzzing_strategies(log_lines, strategies))
(stats['log_lines_unwanted'], stats['log_lines_from_engine'],
stats['log_lines_ignored']) = calculate_log_lines(log_lines)
if stats['log_lines_from_engine'] > 0:
stats['startup_crash_count'] = 0
# Extract '-max_len' value from arguments, if possible.
stats['max_len'] = int(
fuzzer_utils.extract_argument(
arguments, constants.MAX_LEN_FLAG, remove=False) or stats['max_len'])
# Extract sizes of manual and recommended dictionary used for fuzzing.
dictionary_path = fuzzer_utils.extract_argument(
arguments, constants.DICT_FLAG, remove=False)
stats['manual_dict_size'], stats['recommended_dict_size'] = (
dictionary_manager.get_stats_for_dictionary_file(dictionary_path))
# Different crashes and other flags extracted via regexp match.
has_corpus = False
for line in log_lines:
if LIBFUZZER_BAD_INSTRUMENTATION_REGEX.match(line):
stats['bad_instrumentation'] = 1
continue
if LIBFUZZER_CRASH_TESTCASE_REGEX.match(line):
stats['crash_count'] = 1
continue
if LIBFUZZER_LOG_DICTIONARY_REGEX.match(line):
stats['dict_used'] = 1
continue
if LEAK_TESTCASE_REGEX.match(line):
stats['leak_count'] = 1
continue
if (LIBFUZZER_OOM_TESTCASE_REGEX.match(line) or
stacktrace_constants.OUT_OF_MEMORY_REGEX.match(line)):
stats['oom_count'] = 1
continue
if LIBFUZZER_SLOW_UNIT_TESTCASE_REGEX.match(line):
# Use |slow_unit_count| to track if this run had any slow units at all.
# and use |slow_units_count| to track the actual number of slow units in
# this run (used by performance analyzer).
stats['slow_unit_count'] = 1
stats['slow_units_count'] += 1
continue
match = LIBFUZZER_LOG_SEED_CORPUS_INFO_REGEX.match(line)
if match:
has_corpus = True
match = LIBFUZZER_MODULES_LOADED_REGEX.match(line)
if match:
stats['startup_crash_count'] = 0
stats['edges_total'] = int(match.group(2))
if (LIBFUZZER_TIMEOUT_TESTCASE_REGEX.match(line) or
stacktrace_constants.LIBFUZZER_TIMEOUT_REGEX.match(line)):
stats['timeout_count'] = 1
continue
if not stats['max_len']:
# Get "max_len" value from the log, if it has not been found in arguments.
match = LIBFUZZER_LOG_MAX_LEN_REGEX.match(line)
if match:
stats['max_len'] = int(match.group(1))
continue
if has_corpus and not stats['log_lines_from_engine']:
stats['corpus_crash_count'] = 1
return stats
def parse_stats_from_merge_log(log_lines):
"""Extract stats from a log produced by libFuzzer run with -merge=1."""
stats = {
'edge_coverage': 0,
'feature_coverage': 0,
}
# Reverse the list as an optimization. The line of our interest is the last.
for line in reversed(log_lines):
match = LIBFUZZER_MERGE_LOG_STATS_REGEX.match(line)
if match:
stats['edge_coverage'] = int(match.group(2))
stats['feature_coverage'] = int(match.group(1))
break
return stats
|
the-stack_0_24260 | # Copyright (c) Facebook, Inc. and its affiliates.
import json
import numpy as np
import os
import random
import re
import torch
import torch.utils.data
from PIL import Image
from torchvision import transforms as transforms_tv
import slowfast.datasets.transform as transform
import slowfast.utils.logging as logging
# import cv2
from slowfast.utils.env import pathmgr
from .build import DATASET_REGISTRY
from .transform import transforms_imagenet_train
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register()
class Imagenet(torch.utils.data.Dataset):
"""ImageNet dataset."""
def __init__(self, cfg, mode, num_retries=10):
self.num_retries = num_retries
self.cfg = cfg
self.mode = mode
self.data_path = cfg.DATA.PATH_TO_DATA_DIR
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported for ImageNet".format(mode)
logger.info("Constructing ImageNet {}...".format(mode))
if cfg.DATA.PATH_TO_PRELOAD_IMDB == "":
self._construct_imdb()
else:
self._load_imdb()
def _load_imdb(self):
split_path = os.path.join(
self.cfg.DATA.PATH_TO_PRELOAD_IMDB, f"{self.mode}.json"
)
with pathmgr.open(split_path, "r") as f:
data = f.read()
self._imdb = json.loads(data)
def _construct_imdb(self):
"""Constructs the imdb."""
# Compile the split data path
split_path = os.path.join(self.data_path, self.mode)
logger.info("{} data path: {}".format(self.mode, split_path))
# Images are stored per class in subdirs (format: n<number>)
split_files = pathmgr.ls(split_path)
self._class_ids = sorted(
f for f in split_files if re.match(r"^n[0-9]+$", f)
)
# Map ImageNet class ids to contiguous ids
self._class_id_cont_id = {v: i for i, v in enumerate(self._class_ids)}
# Construct the image db
self._imdb = []
for class_id in self._class_ids:
cont_id = self._class_id_cont_id[class_id]
im_dir = os.path.join(split_path, class_id)
for im_name in pathmgr.ls(im_dir):
im_path = os.path.join(im_dir, im_name)
self._imdb.append({"im_path": im_path, "class": cont_id})
logger.info("Number of images: {}".format(len(self._imdb)))
logger.info("Number of classes: {}".format(len(self._class_ids)))
def load_image(self, im_path):
"""Prepares the image for network input with format of CHW RGB float"""
with pathmgr.open(im_path, "rb") as f:
with Image.open(f) as im:
im = im.convert("RGB")
im = torch.from_numpy(np.array(im).astype(np.float32) / 255.0)
# H W C to C H W
im = im.permute([2, 0, 1])
return im
def _prepare_im_res(self, im_path):
# Prepare resnet style augmentation.
im = self.load_image(im_path)
# Train and test setups differ
train_size, test_size = (
self.cfg.DATA.TRAIN_CROP_SIZE,
self.cfg.DATA.TEST_CROP_SIZE,
)
if self.mode == "train":
# For training use random_sized_crop, horizontal_flip, augment, lighting
im = transform.random_sized_crop_img(
im,
train_size,
jitter_scale=self.cfg.DATA.TRAIN_JITTER_SCALES_RELATIVE,
jitter_aspect=self.cfg.DATA.TRAIN_JITTER_ASPECT_RELATIVE,
)
im, _ = transform.horizontal_flip(prob=0.5, images=im)
# im = transforms.augment(im, cfg.TRAIN.AUGMENT)
im = transform.lighting_jitter(
im,
0.1,
self.cfg.DATA.TRAIN_PCA_EIGVAL,
self.cfg.DATA.TRAIN_PCA_EIGVEC,
)
else:
# For testing use scale and center crop
im, _ = transform.uniform_crop(
im, test_size, spatial_idx=1, scale_size=train_size
)
# For training and testing use color normalization
im = transform.color_normalization(
im, self.cfg.DATA.MEAN, self.cfg.DATA.STD
)
# Convert HWC/RGB/float to CHW/BGR/float format
# im = np.ascontiguousarray(im[:, :, ::-1].transpose([2, 0, 1]))
return im
def _prepare_im_tf(self, im_path):
with pathmgr.open(im_path, "rb") as f:
with Image.open(f) as im:
im = im.convert("RGB")
# Convert HWC/BGR/int to HWC/RGB/float format for applying transforms
train_size, test_size = (
self.cfg.DATA.TRAIN_CROP_SIZE,
self.cfg.DATA.TEST_CROP_SIZE,
)
if self.mode == "train":
aug_transform = transforms_imagenet_train(
img_size=(train_size, train_size),
color_jitter=self.cfg.AUG.COLOR_JITTER,
auto_augment=self.cfg.AUG.AA_TYPE,
interpolation=self.cfg.AUG.INTERPOLATION,
re_prob=self.cfg.AUG.RE_PROB,
re_mode=self.cfg.AUG.RE_MODE,
re_count=self.cfg.AUG.RE_COUNT,
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
)
else:
t = []
size = int((256 / 224) * test_size)
t.append(
transforms_tv.Resize(
size, interpolation=3
), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms_tv.CenterCrop(test_size))
t.append(transforms_tv.ToTensor())
t.append(
transforms_tv.Normalize(self.cfg.DATA.MEAN, self.cfg.DATA.STD)
)
aug_transform = transforms_tv.Compose(t)
im = aug_transform(im)
return im
def __load__(self, index):
try:
# Load the image
im_path = self._imdb[index]["im_path"]
# Prepare the image for training / testing
if self.cfg.AUG.ENABLE:
if self.mode == "train" and self.cfg.AUG.NUM_SAMPLE > 1:
im = []
for _ in range(self.cfg.AUG.NUM_SAMPLE):
crop = self._prepare_im_tf(im_path)
im.append(crop)
return im
else:
im = self._prepare_im_tf(im_path)
return im
else:
im = self._prepare_im_res(im_path)
return im
except Exception:
return None
def __getitem__(self, index):
# if the current image is corrupted, load a different image.
for _ in range(self.num_retries):
im = self.__load__(index)
# Data corrupted, retry with a different image.
if im is None:
index = random.randint(0, len(self._imdb) - 1)
else:
break
# Retrieve the label
label = self._imdb[index]["class"]
if isinstance(im, list):
label = [label for _ in range(len(im))]
dummy = [torch.Tensor() for _ in range(len(im))]
return im, label, dummy, dummy, {}
else:
dummy = torch.Tensor()
return [im], label, dummy, dummy, {}
def __len__(self):
return len(self._imdb)
|
the-stack_0_24261 | """
Predict.py
Usage: python predict.py <image>
"""
import sys
from contextlib import contextmanager
import numpy as np
import time
from PIL import Image
from keras.models import model_from_json
from keras.optimizers import SGD
img_rows = 64
img_cols = 64
img_channels = 1
batch_size = 32
nb_epoch = 20
def load_image(filename):
img = Image.open(filename)
img.load()
img = img.resize((128, 128), Image.BICUBIC)
img.thumbnail((img_cols, img_rows))
a4im = Image.new('RGB',
(img_cols, img_rows),
(255, 255, 255))
a4im.paste(img, (0, 0))
return np.array([np.array(a4im.convert("L"))])
if len(sys.argv) < 2:
print(__doc__)
exit(0)
image = np.array([load_image(sys.argv[1])])
nb_classes = 2
X_test = image.astype('float32')
X_test /= 255
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model = model_from_json(open('cnn_64_1.json').read())
model.load_weights('cnn_64_1.h5')
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
@contextmanager
def measure_time():
t1 = time.clock()
yield
t2 = time.clock()
print('%s: %0.2f seconds elapsed' % ("", t2-t1))
with measure_time():
if model.predict_classes(X_test, verbose=0)[0] == 0:
print("")
print("android")
else:
print("")
print("iphone")
print(model.predict_proba(X_test, verbose=0))
|
the-stack_0_24262 | import numpy as np
from astropy import units as u
from astropy.coordinates import (
HCRS,
ITRS as _ITRS,
BaseRADecFrame,
FunctionTransform,
TimeAttribute,
frame_transform_graph,
)
from astropy.coordinates.builtin_frames.utils import DEFAULT_OBSTIME
from astropy.coordinates.matrix_utilities import rotation_matrix
from poliastro.bodies import (
Jupiter,
Mars,
Mercury,
Moon,
Neptune,
Saturn,
Sun,
Uranus,
Venus,
)
from poliastro.constants import J2000
from .equatorial import (
JupiterICRS,
MarsICRS,
MercuryICRS,
MoonICRS,
NeptuneICRS,
SaturnICRS,
UranusICRS,
VenusICRS,
)
__all__ = [
"SunFixed",
"MercuryFixed",
"VenusFixed",
"ITRS",
"MarsFixed",
"JupiterFixed",
"SaturnFixed",
"UranusFixed",
"NeptuneFixed",
]
# HACK: sphinx-autoapi variable definition
ITRS = _ITRS
class _PlanetaryFixed(BaseRADecFrame):
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
def __new__(cls, *args, **kwargs):
frame_transform_graph.transform(FunctionTransform, cls, cls.equatorial)(
cls.to_equatorial
)
frame_transform_graph.transform(FunctionTransform, cls.equatorial, cls)(
cls.from_equatorial
)
return super().__new__(cls)
@staticmethod
def to_equatorial(fixed_coo, equatorial_frame):
# TODO replace w/ something smart (Sun/Earth special cased)
if fixed_coo.body == Sun:
assert type(equatorial_frame) == HCRS
else:
assert fixed_coo.body == equatorial_frame.body
r = fixed_coo.cartesian
ra, dec, W = fixed_coo.rot_elements_at_epoch(equatorial_frame.obstime)
r = r.transform(rotation_matrix(-W, "z"))
r_trans1 = r.transform(rotation_matrix(-(90 * u.deg - dec), "x"))
data = r_trans1.transform(rotation_matrix(-(90 * u.deg + ra), "z"))
return equatorial_frame.realize_frame(data)
@staticmethod
def from_equatorial(equatorial_coo, fixed_frame):
# TODO replace w/ something smart (Sun/Earth special cased)
if fixed_frame.body == Sun:
assert type(equatorial_coo) == HCRS
else:
assert equatorial_coo.body == fixed_frame.body
r = equatorial_coo.cartesian
ra, dec, W = fixed_frame.rot_elements_at_epoch(fixed_frame.obstime)
r_trans2 = r.transform(rotation_matrix(90 * u.deg + ra, "z"))
r_f = r_trans2.transform(rotation_matrix(90 * u.deg - dec, "x"))
r_f = r_f.transform(rotation_matrix(W, "z"))
return fixed_frame.realize_frame(r_f)
@classmethod
def rot_elements_at_epoch(cls, epoch):
"""Provides rotational elements at epoch.
Provides north pole of body and angle to prime meridian.
Parameters
----------
epoch : ~astropy.time.Time, optional
Epoch, default to J2000.
Returns
-------
ra, dec, W: tuple (~astropy.units.Quantity)
Right ascension and declination of north pole, and angle of the prime meridian.
"""
T = (epoch.tdb - J2000).to(u.day).value / 36525
d = (epoch.tdb - J2000).to(u.day).value
return cls._rot_elements_at_epoch(T, d)
@staticmethod
def _rot_elements_at_epoch(T, d):
raise NotImplementedError
class SunFixed(_PlanetaryFixed):
body = Sun
equatorial = HCRS
@staticmethod
def _rot_elements_at_epoch(T, d):
ra = 286.13 * u.deg
dec = 63.87 * u.deg
W = (84.176 + 14.1844000 * d) * u.deg
return ra, dec, W
class MercuryFixed(_PlanetaryFixed):
body = Mercury
equatorial = MercuryICRS
@staticmethod
def _rot_elements_at_epoch(T, d):
M1 = (174.7910857 + 4.092335 * d) * u.deg
M2 = (349.5821714 + 8.184670 * d) * u.deg
M3 = (164.3732571 + 12.277005 * d) * u.deg
M4 = (339.1643429 + 16.369340 * d) * u.deg
M5 = (153.9554286 + 20.461675 * d) * u.deg
ra = (281.0103 - 0.0328 * T) * u.deg
dec = (61.45 - 0.005 * T) * u.deg
W = (329.5988 + 6.1385108 * d) * u.deg + (
0.01067257 * np.sin(M1.to("rad").value)
- 0.00112309 * np.sin(M2.to("rad").value)
- 0.00011040 * np.sin(M3.to("rad").value)
- 0.00002539 * np.sin(M4.to("rad").value)
- 0.00000571 * np.sin(M5.to("rad").value)
) * u.deg
return ra, dec, W
class VenusFixed(_PlanetaryFixed):
body = Venus
equatorial = VenusICRS
@staticmethod
def _rot_elements_at_epoch(T, d):
ra = 272.76 * u.deg
dec = 67.16 * u.deg
W = (160.20 - 1.4813688 * d) * u.deg
return ra, dec, W
class MarsFixed(_PlanetaryFixed):
body = Mars
equatorial = MarsICRS
@staticmethod
def _rot_elements_at_epoch(T, d):
M1 = (198.991226 + 19139.4819985 * T) * u.deg
M2 = (226.292679 + 38280.8511281 * T) * u.deg
M3 = (249.663391 + 57420.7251593 * T) * u.deg
M4 = (266.183510 + 76560.6367950 * T) * u.deg
M5 = (79.398797 + 0.5042615 * T) * u.deg
ra = (
317.269202
- 0.10927547 * T
+ 0.000068 * np.sin(M1.to("rad").value)
+ 0.000238 * np.sin(M2.to("rad").value)
+ 0.000052 * np.sin(M3.to("rad").value)
+ 0.000009 * np.sin(M4.to("rad").value)
+ 0.419057 * np.sin(M5.to("rad").value)
) * u.deg
K1 = (122.433576 + 19139.9407476 * T) * u.deg
K2 = (43.058401 + 38280.8753272 * T) * u.deg
K3 = (57.663379 + 57420.7517205 * T) * u.deg
K4 = (79.476401 + 76560.6495004 * T) * u.deg
K5 = (166.325722 + 0.5042615 * T) * u.deg
dec = (
54.432516
- 0.05827105 * T
+ 0.000051 * np.cos(K1.to("rad").value)
+ 0.000141 * np.cos(K2.to("rad").value)
+ 0.000031 * np.cos(K3.to("rad").value)
+ 0.000005 * np.cos(K4.to("rad").value)
+ 1.591274 * np.cos(K5.to("rad").value)
) * u.deg
J1 = (129.071773 + 19140.0328244 * T) * u.deg
J2 = (36.352167 + 38281.0473591 * T) * u.deg
J3 = (56.668646 + 57420.9295360 * T) * u.deg
J4 = (67.364003 + 76560.2552215 * T) * u.deg
J5 = (104.792680 + 95700.4387578 * T) * u.deg
J6 = (95.391654 + 0.5042615 * T) * u.deg
W = (
176.049863
+ 350.891982443297 * d
+ 0.000145 * np.sin(J1.to("rad").value)
+ 0.000157 * np.sin(J2.to("rad").value)
+ 0.000040 * np.sin(J3.to("rad").value)
+ 0.000001 * np.sin(J4.to("rad").value)
+ 0.000001 * np.sin(J5.to("rad").value)
+ 0.584542 * np.sin(J6.to("rad").value)
) * u.deg
return ra, dec, W
class JupiterFixed(_PlanetaryFixed):
body = Jupiter
equatorial = JupiterICRS
@staticmethod
def _rot_elements_at_epoch(T, d):
Ja = (99.360714 + 4850.4046 * T) * u.deg
Jb = (175.895369 + 1191.9605 * T) * u.deg
Jc = (300.323162 + 262.5475 * T) * u.deg
Jd = (114.012305 + 6070.2476 * T) * u.deg
Je = (49.511251 + 64.3000 * T) * u.deg
ra = (
268.056595
- 0.006499 * T
+ 0.000117 * np.sin(Ja.to("rad").value)
+ 0.000938 * np.sin(Jb.to("rad").value)
+ 0.001432 * np.sin(Jc.to("rad").value)
+ 0.000030 * np.sin(Jd.to("rad").value)
+ 0.002150 * np.sin(Je.to("rad").value)
) * u.deg
dec = (
64.495303
+ 0.002413 * T
+ 0.000050 * np.cos(Ja.to("rad").value)
+ 0.000404 * np.cos(Jb.to("rad").value)
+ 0.000617 * np.cos(Jc.to("rad").value)
- 0.000013 * np.cos(Jd.to("rad").value)
+ 0.000926 * np.cos(Je.to("rad").value)
) * u.deg
W = (284.95 + 870.536 * d) * u.deg
return ra, dec, W
class SaturnFixed(_PlanetaryFixed):
body = Saturn
equatorial = SaturnICRS
@staticmethod
def _rot_elements_at_epoch(T, d):
ra = (40.589 - 0.036 * T) * u.deg
dec = (83.537 - 0.004 * T) * u.deg
W = (38.90 + 810.7939024 * d) * u.deg
return ra, dec, W
class UranusFixed(_PlanetaryFixed):
body = Uranus
equatorial = UranusICRS
@staticmethod
def _rot_elements_at_epoch(T, d):
ra = 257.311 * u.deg
dec = -15.175 * u.deg
W = (203.81 - 501.1600928 * d) * u.deg
return ra, dec, W
class NeptuneFixed(_PlanetaryFixed):
body = Neptune
equatorial = NeptuneICRS
@staticmethod
def _rot_elements_at_epoch(T, d):
N = (357.85 + 52.316 * T) * u.deg
ra = (299.36 + 0.70 * np.sin(N.to("rad").value)) * u.deg
dec = (43.46 - 0.51 * np.cos(N.to("rad").value)) * u.deg
W = (249.978 + 541.1397757 * d - 0.48 * np.sin(N.to("rad").value)) * u.deg
return ra, dec, W
class MoonFixed(_PlanetaryFixed):
body = Moon
equatorial = MoonICRS
@staticmethod
def _rot_elements_at_epoch(T, d):
E1 = (125.045 - 0.0529921 * d) * u.deg
E2 = (250.089 - 0.1059842 * d) * u.deg
E3 = (260.008 + 13.0120009 * d) * u.deg
E4 = (176.625 + 13.3407154 * d) * u.deg
E5 = (357.529 + 0.9856003 * d) * u.deg
E6 = (311.589 + 26.4057084 * d) * u.deg
E7 = (134.963 + 13.0649930 * d) * u.deg
E8 = (276.617 + 0.3287146 * d) * u.deg
E9 = (34.226 + 1.7484877 * d) * u.deg
E10 = (15.134 - 0.1589763 * d) * u.deg
E11 = (119.743 + 0.0036096 * d) * u.deg
E12 = (239.961 + 0.1643573 * d) * u.deg
E13 = (25.053 + 12.9590088 * d) * u.deg
ra = (
269.9949
+ 0.0031 * T
- 3.8787 * np.sin(E1.to("rad").value)
- 0.1204 * np.sin(E2.to("rad").value)
+ 0.0700 * np.sin(E3.to("rad").value)
- 0.0172 * np.sin(E4.to("rad").value)
+ 0.0072 * np.sin(E6.to("rad").value)
- 0.0052 * np.sin(E10.to("rad").value)
+ 0.0043 * np.sin(E13.to("rad").value)
) * u.deg
dec = (
66.5392
+ 0.0130 * T
+ 1.5419 * np.cos(E1.to("rad").value)
+ 0.0239 * np.cos(E2.to("rad").value)
- 0.0278 * np.cos(E3.to("rad").value)
+ 0.0068 * np.cos(E4.to("rad").value)
- 0.0029 * np.cos(E6.to("rad").value)
+ 0.0009 * np.cos(E7.to("rad").value)
+ 0.0008 * np.cos(E10.to("rad").value)
- 0.0009 * np.cos(E13.to("rad").value)
) * u.deg
W = (
38.3213
+ 13.17635815 * d
- 1.4e-12 * d ** 2
+ 3.5610 * np.sin(E1.to("rad").value)
+ 0.1208 * np.sin(E2.to("rad").value)
- 0.0642 * np.sin(E3.to("rad").value)
+ 0.0158 * np.sin(E4.to("rad").value)
+ 0.0252 * np.sin(E5.to("rad").value)
- 0.0066 * np.sin(E6.to("rad").value)
- 0.0047 * np.sin(E7.to("rad").value)
- 0.0046 * np.sin(E8.to("rad").value)
+ 0.0028 * np.sin(E9.to("rad").value)
+ 0.0052 * np.sin(E10.to("rad").value)
+ 0.0040 * np.sin(E11.to("rad").value)
+ 0.0019 * np.sin(E12.to("rad").value)
- 0.0044 * np.sin(E13.to("rad").value)
) * u.deg
return ra, dec, W
|
the-stack_0_24263 | import time
class ProgressBar(object):
_empty: str
_fill: str
_left: str
_length: int
_right: int
_progress: int = 0
def __init__(self, length = 16, left = "[", fill = "#", empty = " ", right = "]"):
self._length = length
self._left = left
self._fill = fill
self._empty = empty
self._right = right
fill_string: str = ""
for i in range(0, length):
fill_string += fill if i < self._progress else empty
print(left + fill_string + right)
def set_progress(self, progress: int):
self._progress = progress
fill_string: str = "\033[F" + self._left
for i in range(0, self._length):
fill_string += self._fill if i < self._progress else self._empty
print(fill_string + self._right)
def __main__():
debug_bar = ProgressBar(64, "<", "=", "-", ">")
for i in range(0, 65):
debug_bar.set_progress(i)
time.sleep(0.1)
print("Done!")
if __name__ == "__main__":
__main__() |
the-stack_0_24264 | import numpy as np
STRUCTURES = 5
# Process next CLAMP sequence data: get sequence and binding affinity
# A CLAMP line looks like: SCORE SEQUQNCE
def process_clamp(clamp_file):
data = clamp_file.readline()
if not data:
return None
line = data.strip().split()
score = float(line[0])
seq = line[1]
return (seq, score)
# Process next RNAcontext sequence data: get sequnce and (STRUCTURES X SEQ_LEN) matrix
def process_rnacontext(rnacontext_file):
data = rnacontext_file.readline()
if not data:
return None
seq_line = data.strip()
assert (seq_line[0] == '>')
seq = seq_line[1:]
matrix = list()
for structure_index in range(STRUCTURES):
structure_line = rnacontext_file.readline().strip()
matrix_line = [float(elem) for elem in structure_line.split()]
matrix.append(matrix_line)
return (seq, matrix)
def read_combined_data(sequences_path, structures_path, max_seq_len):
with open(sequences_path, 'r') as sequences, open(structures_path, 'r') as structures:
data = list()
lengths = list()
labels = list()
counter = 0
while True:
counter += 1
seq_data = process_clamp(sequences)
structure_data = process_rnacontext(structures)
if not seq_data or not structure_data:
return np.array(data), np.array(lengths), np.array(labels), counter-1
#print ("Line", counter)
#print (seq_data)
#print (structure_data)
# Compute a matrix of SEQ_LEN X RNA_ALPHABET for decoding the sequence bases
labels.append(seq_data[1])
seq_matrix = list()
for base in structure_data[0]:
if base == 'A':
base_encoding = [1, 0, 0, 0]
elif base == 'C':
base_encoding = [0, 1, 0, 0]
elif base == 'G':
base_encoding = [0, 0, 1, 0]
elif base == 'U':
base_encoding = [0, 0, 0, 1]
else:
raise ValueError
seq_matrix.append(base_encoding)
seq_matrix = np.array(seq_matrix)
# Compute a matrix of SEQ_LEN X STRUCTURE for decoding the sequence structures
struct_matrix = np.transpose(np.array(structure_data[1]))
base_matrix = np.concatenate((seq_matrix, struct_matrix), axis=1)
#print ("==")
#print (base_matrix.shape)
#Padding
curr_seq_len = base_matrix.shape[0]
lengths.append(curr_seq_len)
padd_len = max_seq_len - curr_seq_len
assert (padd_len >= 0)
if padd_len > 0:
padding_matrix = np.zeros((padd_len, base_matrix.shape[1]))
base_matrix = np.concatenate((base_matrix, padding_matrix), axis=0)
#print (base_matrix.shape)
data.append(base_matrix)
assert(False)
# Testing code
if __name__ == "__main__":
DATA_DIR = "/specific/a/home/cc/students/cs/shiranabadi/motif-binding/"
TRAIN_STRUCTURE_FILE= "RNCMPT00001.txt.annotations_A.RNAcontext"
TEST_STRUCTURE_FILE = "RNCMPT00001.txt.annotations_B.RNAcontext"
TRAIN_SEQUENCE_FILE = "RNCMPT00001.txt.sequences_A.RNAcontext.clamp"
TEST_SEQUENCE_FILE = "RNCMPT00001.txt.sequences_B.RNAcontext.clamp"
MAX_SEQ_LEN = 41
data, lengths, labels = read_combined_data(DATA_DIR + TRAIN_SEQUENCE_FILE, DATA_DIR + TRAIN_STRUCTURE_FILE, MAX_SEQ_LEN)
print (data.shape)
print (lengths.shape)
print (labels.shape)
print (data[0])
print (lengths[0])
print (labels[0])
|
the-stack_0_24265 | # -*- coding: utf8 -*-
import datetime
import random
import ssl
import socket
from typing import Dict, List, Tuple
import pytz
import requests
from nio import AsyncClient
from core.bot_commands import Command
from core.plugin import Plugin
import logging
logger = logging.getLogger(__name__)
plugin = Plugin(
"federation_status",
"Matrix",
"Keeps track of federation status with all homeservers in the bot's rooms",
)
def setup():
"""
This just moves the initial setup-commands to the top for better readability
:return: -
"""
plugin.add_config("room_list", default_value=None, is_required=False)
plugin.add_config("warn_cert_expiry", default_value=7, is_required=True)
plugin.add_config("server_max_age", default_value=60, is_required=True)
plugin.add_config(
"federation_tester_url",
default_value="https://federationtester.matrix.org",
is_required=True,
)
plugin.add_command(
"federation",
command_federation_status,
"Displays the current status of all federating servers in the room",
room_id=plugin.read_config("room_list"),
)
plugin.add_command("federation_update", update_federation_status, "Update all known server's data")
plugin.add_timer(update_federation_status, frequency=datetime.timedelta(minutes=5))
class Server:
def __init__(self, server_name: str):
self.server_name: str = server_name
self.last_update: datetime.datetime or None = None
self.last_alive: datetime.datetime or None = None
self.cert_expiry: datetime.datetime or None = None
self.last_posted_warning: datetime.datetime = datetime.datetime(1970, 1, 1)
self.software: str or None = None
self.version: str or None = None
self.federation_test()
self.currently_alive: bool = self.is_alive()
def is_alive(self) -> bool:
"""
Checks if the server is currently alive, e.g. if last_alive >= last_update.
:return:
"""
if self.last_alive and self.last_update:
return self.last_alive >= self.last_update
async def time_until_expire(self) -> datetime.timedelta:
"""
Returns the time left until the server's cert expires
:return:
"""
return self.cert_expiry - datetime.datetime.now()
async def is_expired(self) -> bool:
"""
Checks if the server's certificate is currently expired
:return:
"""
return await self.cert_expiry < datetime.datetime.now()
async def last_updated_within(self, timeframe: datetime.timedelta) -> bool:
"""
Checks whether the server has been successfully updated within the given timeframe
:param timeframe:
:return:
"""
return self.last_update > (datetime.datetime.now() - timeframe)
async def needs_update(self) -> bool:
"""
Checks whether the server's data needs to be updates
:return:
"""
# server is currently offline and hasn't been offline for more than a week
if not self.currently_alive and self.last_alive and self.last_alive > datetime.datetime.now() - datetime.timedelta(days=7):
return True
# certificate will expire in less than 10 minutes
elif self.cert_expiry < datetime.datetime.now() + datetime.timedelta(minutes=10):
return True
# server hasn't been updated for more than server_max_age (+-5 minutes to distribute updates a little)
elif not await self.last_updated_within(datetime.timedelta(minutes=plugin.read_config("server_max_age") + random.randint(-5, 5))):
return True
else:
return False
async def needs_warning(self) -> bool:
"""
Warn once, if a certificate expiration date is known, is in the future, but less than warn_steps away
:return:
"""
# TODO: only report servers that have been offline for more than 5 minutes, but report expired certs immediately?
warn_steps: List[datetime.timedelta] = [
datetime.timedelta(minutes=10),
datetime.timedelta(days=1),
datetime.timedelta(days=plugin.read_config("warn_cert_expiry")),
]
step: datetime.timedelta
if not self.cert_expiry:
return False
else:
for step in warn_steps:
if datetime.datetime.now() < self.cert_expiry < (datetime.datetime.now() + step) and self.last_posted_warning < (
datetime.datetime.now() - step
):
return True
return False
def federation_test(self):
"""
Do a federation_test for the given server
:return: Tuple of: last_update: timestamp of the last successful update,
last_alive: timestamp of the servers last successful check,
cert_expiry: timestamp of the server's certificate expiry date
"""
api_parameters = {"server_name": self.server_name}
logger.debug(f"Updating {self.server_name}")
try:
response: requests.Response = requests.get(
plugin.read_config("federation_tester_url") + "/api/report",
params=api_parameters,
)
except requests.exceptions.ConnectionError as err:
logger.warning(f"Connection to federation-tester failed: {err}")
return
if response.status_code == 200:
federation_data: Dict[str, any] = response.json()
self.last_update = datetime.datetime.now()
self.software = federation_data.get("Version").get("name")
self.version = federation_data.get("Version").get("version")
# check certificates of all hosts and store the smallest expiry date
host: str
port: int
hosts: List[Tuple[str, int]] = []
if federation_data.get("WellKnownResult").get("m.server"):
# read host and port from well-known, strip trailing '.' from host
host = federation_data.get("WellKnownResult").get("m.server").split(":")[0].rstrip(".")
if len(federation_data.get("WellKnownResult").get("m.server").split(":")) > 1:
port = int(federation_data.get("WellKnownResult").get("m.server").split(":")[1])
else:
port = 8448
hosts = [(host, port)]
else:
# read hosts from DNS-Result, strip trailing '.', default to Port 8448 for now
for host in federation_data.get("DNSResult").get("Hosts").keys():
hosts.append((host.rstrip("."), 8448))
min_expire_date: datetime.datetime = datetime.datetime(year=2500, month=1, day=1)
for (host, port) in hosts:
try:
expire_date: datetime.datetime or None = ssl_expiry_datetime(host, port)
except ssl.SSLCertVerificationError:
expire_date = None
if expire_date:
min_expire_date = min(expire_date, min_expire_date)
self.cert_expiry = min_expire_date
if federation_data.get("FederationOK"):
self.last_alive = datetime.datetime.now()
self.currently_alive = True
else:
self.currently_alive = False
def ssl_expiry_datetime(host: str, port=8448) -> datetime.datetime or None:
"""
Connects to a host on the given port and retrieves it's TLS-certificate.
:param host: a hostname or IP-Address
:param port: the port to connect to
:return: datetime.datime of the certificates expiration
"""
ssl_date_fmt: str = r"%b %d %H:%M:%S %Y %Z"
context: ssl.SSLContext = ssl.create_default_context()
conn: ssl.SSLSocket = context.wrap_socket(socket.socket(socket.AF_INET), server_hostname=host)
# 3 second timeout because Lambda has runtime limitations
conn.settimeout(3.0)
try:
conn.connect((host, port))
ssl_info: Dict or Tuple or None = conn.getpeercert()
if ssl_info:
expiry_date: datetime.datetime = datetime.datetime.strptime(ssl_info["notAfter"], ssl_date_fmt)
# add timezone offset
timezone = pytz.timezone("CET")
offset = timezone.utcoffset(expiry_date)
expiry_date = expiry_date + offset
return expiry_date
else:
return None
except socket.timeout:
return None
async def update_federation_status(client_or_command: AsyncClient or Command):
"""
Regularly check last_update timestamps of each server and get an updated status if older than server_max_age
:param client:
:return:
"""
client: AsyncClient
forced_update: bool = False
if isinstance(client_or_command, AsyncClient):
# we're called by timer
client = client_or_command
else:
# we're called by command, force an update
client = client_or_command.client
forced_update = True
# get a list of shared servers on rooms the plugin is active for
shared_servers: List[str] = await plugin.get_connected_servers(client, plugin.read_config("room_list"))
server_list_saved: Dict[str, Server] or None = await plugin.read_data("server_list")
data_changed: bool = False
if not server_list_saved:
server_list_new: Dict[str, Server] = {}
# get initial server status and save it
for server_name in shared_servers:
server_list_new[server_name] = Server(server_name)
await plugin.store_data("server_list", server_list_new)
else:
server_names_saved: List[str] = list(server_list_saved.keys())
# delete data for servers we're not federating with anymore
for server_name in server_names_saved:
if server_name not in shared_servers:
del server_list_saved[server_name]
data_changed = True
# add new servers
for server_name in shared_servers:
if server_name not in server_names_saved:
server_list_saved[server_name] = Server(server_name)
del server_names_saved
# check for changes
previously_dead_servers: List[str] = []
previously_alive_servers: List[str] = []
new_dead_servers: List[str] = []
new_alive_servers: List[str] = []
need_warning_servers: List[Tuple[str, datetime.datetime]] = []
for server in server_list_saved.values():
if server.currently_alive:
previously_alive_servers.append(server.server_name)
else:
previously_dead_servers.append(server.server_name)
# update servers' status if required
server_list_new: Dict[str, Server] = server_list_saved
for server in server_list_new.values():
if forced_update or await server.needs_update():
server.federation_test()
data_changed = True
if server.currently_alive and server.server_name not in previously_alive_servers:
new_alive_servers.append(server.server_name)
if not server.currently_alive and server.server_name not in previously_dead_servers:
new_dead_servers.append(server.server_name)
if await server.needs_warning():
need_warning_servers.append((server.server_name, server.cert_expiry))
server.last_posted_warning = datetime.datetime.now()
# announce any changes
room_list: List[str] = plugin.read_config("room_list")
if not room_list:
room_list = [x for x in client.rooms]
for room_id in room_list:
for server in new_dead_servers:
try:
user_ids: List[str] = (await plugin.get_users_on_servers(client, [server], [room_id]))[server]
message: str = f"Federation error: {server} offline. \n"
message += f"Isolated users: {', '.join([await plugin.link_user_by_id(client, room_id, user_id) for user_id in user_ids])}."
await plugin.send_notice(client, room_id, message)
except KeyError:
pass
for server in new_alive_servers:
try:
user_ids: List[str] = (await plugin.get_users_on_servers(client, [server], [room_id]))[server]
message: str = f"Federation recovery: {server} back online. \n"
message += f"Welcome back, {', '.join([await plugin.link_user_by_id(client, room_id, user_id) for user_id in user_ids])}."
await plugin.send_notice(client, room_id, message)
except KeyError:
pass
expire_date: datetime.datetime
for server, expire_date in need_warning_servers:
try:
user_ids: List[str] = (await plugin.get_users_on_servers(client, [server], [room_id]))[server]
message: str = f"Federation warning: {server}'s certificate will expire on {expire_date} (in {expire_date - datetime.datetime.now()}) \n"
message += (
f"{', '.join([await plugin.link_user_by_id(client, room_id, user_id) for user_id in user_ids])} will be isolated until "
f"the server's certificate has been renewed."
)
await plugin.send_message(client, room_id, message)
except KeyError:
pass
if data_changed:
await plugin.store_data("server_list", server_list_new)
async def command_federation_status(command: Command):
"""
Displays the current status of all federating servers in the room
:param command:
:return:
"""
message: str = ""
room_list: List[str] = []
if len(command.args) == 1 and command.args[0] == "global":
message += f"Federation status (all known rooms): \n"
elif len(command.args) == 0:
message += f"Federation status (this room only): \n"
room_list = [command.room.room_id]
else:
await plugin.respond_notice(command, "Usage: `federation [global]`")
return
server_list_saved: Dict[str, Server] or None = await plugin.read_data("server_list")
server: Server
server_name: str
for server_name in await plugin.get_connected_servers(command.client, room_list):
server = server_list_saved[server_name]
if not server.currently_alive:
message += f"<font color=red>{server.server_name} offline (last alive: {server.last_alive})</font>. "
elif (datetime.datetime.now() + datetime.timedelta(days=plugin.read_config("warn_cert_expiry"))) > server.cert_expiry:
message += f"<font color=yellow>{server.server_name} warning</font>. "
else:
message += f"<font color=green>{server.server_name} online</font>. "
if server.software:
message += f"**Server**: {server.software} ({server.version}). "
if server.cert_expiry:
message += f"**Cert expiry**: {server.cert_expiry} ({server.cert_expiry - datetime.datetime.now()}). "
num_users: int = len((await plugin.get_users_on_servers(command.client, [server.server_name], room_list))[server_name])
message += f"**Users**: {num_users}. \n"
await plugin.respond_notice(command, message)
setup()
|
the-stack_0_24266 | import zipfile
import os
import shutil
with zipfile.ZipFile('tiny-imagenet-200.zip','r') as my_zip:
my_zip.extractall()
print('------------- INFO : Preparing Train and Test Loader Folder ---------------- ')
os.makedirs('TinyImageNet')
os.makedirs('TinyImageNet/train')
os.makedirs('TinyImageNet/test')
os.makedirs('TinyImageNet/val')
for classes in os.listdir('tiny-imagenet-200/train'):
os.makedirs('TinyImageNet/train/'+classes)
for images in os.listdir('tiny-imagenet-200/train/'+classes+'/images'):
shutil.move('tiny-imagenet-200/train/'+classes+'/images/'+images,'TinyImageNet/train/'+classes)
print('Train Data Transfer successfully')
val_class = []
for line in open('tiny-imagenet-200/val/val_annotations.txt','r'):
img_name , class_id = line.split('\t')[:2]
val_class.append(class_id)
val_class = list(set(val_class))
for i in val_class:
os.makedirs(f'TinyImageNet/val/{i}')
for line in open('tiny-imagenet-200/val/val_annotations.txt','r'):
img_name , class_id = line.split('\t')[:2]
shutil.move(f'tiny-imagenet-200/val/images/{img_name}',f'TinyImageNet/val/{class_id}')
print('Val Data Transfer successfully')
print('------------- INFO : Train and Test Loader Folder TinyImageNet Done ---------------- ') |
the-stack_0_24267 | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyscf import scf, gto
from pyscf.eph import eph_fd, rhf
import numpy as np
import unittest
mol = gto.M()
mol.atom = [['O', [0.000000000000, -0.000000000775, 0.923671924285]],
['H', [-0.000000000000, -1.432564848017, 2.125164039823]],
['H', [0.000000000000, 1.432564848792, 2.125164035930]]]
mol.unit = 'Bohr'
mol.basis = 'sto3g'
mol.verbose=4
mol.build() # this is a pre-computed relaxed geometry
class KnownValues(unittest.TestCase):
def test_finite_diff_rhf_eph(self):
mf = scf.RHF(mol)
mf.conv_tol = 1e-16
mf.conv_tol_grad = 1e-10
mf.kernel()
grad = mf.nuc_grad_method().kernel()
self.assertTrue(abs(grad).max()<1e-5)
mat, omega = eph_fd.kernel(mf)
matmo, _ = eph_fd.kernel(mf, mo_rep=True)
myeph = rhf.EPH(mf)
eph, _ = myeph.kernel()
ephmo, _ = myeph.kernel(mo_rep=True)
for i in range(len(omega)):
self.assertTrue(min(np.linalg.norm(eph[i]-mat[i]),np.linalg.norm(eph[i]+mat[i]))<1e-5)
self.assertTrue(min(abs(eph[i]-mat[i]).max(), abs(eph[i]+mat[i]).max())<1e-5)
self.assertTrue(min(np.linalg.norm(ephmo[i]-matmo[i]),np.linalg.norm(ephmo[i]+matmo[i]))<1e-5)
self.assertTrue(min(abs(ephmo[i]-matmo[i]).max(), abs(ephmo[i]+matmo[i]).max())<1e-5)
if __name__ == '__main__':
print("Full Tests for RHF")
unittest.main()
|
the-stack_0_24268 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already conflicted or abandoned.
"""
from test_framework.test_framework import GenesisTestFramework
from test_framework.util import *
class AbandonConflictTest(GenesisTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def run_test(self):
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10btc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# Create a child tx spending ABC2
signed3_change = Decimal("24.999")
inputs = [ {"txid":txABC2, "vout":0} ]
outputs = { self.nodes[0].getnewaddress(): signed3_change }
signed3 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
# note tx is never directly referenced, only abandoned as a child of the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + signed3_change)
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - signed3_change)
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 GENX outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 GENX output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
|
the-stack_0_24272 | '''
Author : Gehrychiang
LastEditTime : 2021-03-05 17:54:13
Website : www.yilantingfeng.site
E-mail : [email protected]
ProbTitle : (记得补充题目标题)
'''
# filesubmitter
# TSM1PL6kY8MWeVXwPQzugFJWynVLChuY
# filedownloader
# 4ydSRMSkOUefyBeZenFTRhj6jOXc1Asr
import upyun
import requests
import os
import sys
import json
bucket_name='*'
operator_name='*'
operator_token='*'
up = upyun.UpYun(bucket_name, operator_name, operator_token, timeout=60, endpoint=upyun.ED_AUTO)
headers = { 'py_file_submiter': '180' }
print("Syncing data with server....\n")
r = requests.get('http://api.yilantingfeng.site/', params={'type':'fsub'})
title =r.json()['data']['title']
print('Collecting: ',title,'\n')
print("To continue, please input the authorization code")
psd=input()
q = requests.get('http://api.yilantingfeng.site/', params={'type':'fsub','pwd':psd})
if not(q.json()['msg'] == "pass"):
print("Invalid authorization code!")
os.system('pause')
sys.exit(1)
print("Authorization checked!")
underway=r.json()['data']['underway']
if int(underway) == 1:
print("Collection has not ended!\n")
print("Confirm downloading?\n")
req=input("input yes for futher proceeder ")
if not(req == "yes"):
os.system('pause')
sys.exit(1)
path = os.getcwd()+'\\'+title
if not(os.path.exists(path)):
os.mkdir(path)
res = up.getlist('/'+title+'/')
for i in range(len(res)):
print("downloading: "+res[i]['name']+'\n')
if(os.path.exists(path+'\\'+res[i]['name'])):
os.remove(path+'\\'+res[i]['name'])
with open(path+'\\'+res[i]['name'], 'wb') as f:
up.get('/'+title+'/'+res[i]['name'], f)
print("download succeed!")
os.system('pause') |
the-stack_0_24274 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import numpy as np
from .stylegan_networks import StyleGAN2Discriminator, StyleGAN2Generator, TileStyleGAN2Discriminator
###############################################################################
# Helper Functions
###############################################################################
def get_filter(filt_size=3):
if(filt_size == 1):
a = np.array([1., ])
elif(filt_size == 2):
a = np.array([1., 1.])
elif(filt_size == 3):
a = np.array([1., 2., 1.])
elif(filt_size == 4):
a = np.array([1., 3., 3., 1.])
elif(filt_size == 5):
a = np.array([1., 4., 6., 4., 1.])
elif(filt_size == 6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(filt_size == 7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:, None] * a[None, :])
filt = filt / torch.sum(filt)
return filt
class Downsample(nn.Module):
def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size == 1):
if(self.pad_off == 0):
return inp[:, :, ::self.stride, ::self.stride]
else:
return self.pad(inp)[:, :, ::self.stride, ::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
class Upsample2(nn.Module):
def __init__(self, scale_factor, mode='nearest'):
super().__init__()
self.factor = scale_factor
self.mode = mode
def forward(self, x):
return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode)
class Upsample(nn.Module):
def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
super(Upsample, self).__init__()
self.filt_size = filt_size
self.filt_odd = np.mod(filt_size, 2) == 1
self.pad_size = int((filt_size - 1) / 2)
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size) * (stride**2)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
def forward(self, inp):
ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
if(self.filt_odd):
return ret_val
else:
return ret_val[:, :, :-1, :-1]
def get_pad_layer(pad_type):
if(pad_type in ['refl', 'reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl', 'replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type == 'zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized' % pad_type)
return PadLayer
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if debug:
print(classname)
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
# if not amp:
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
if initialize_weights:
init_weights(net, init_type, init_gain=init_gain, debug=debug)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal',
init_gain=0.02, no_antialias=False, no_antialias_up=False, gpu_ids=[], opt=None):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=6, opt=opt)
elif netG == 'resnet_4blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=4, opt=opt)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'stylegan2':
net = StyleGAN2Generator(input_nc, output_nc, ngf, use_dropout=use_dropout, opt=opt)
elif netG == 'smallstylegan2':
net = StyleGAN2Generator(input_nc, output_nc, ngf, use_dropout=use_dropout, n_blocks=2, opt=opt)
elif netG == 'resnet_cat':
n_blocks = 8
net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu')
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG))
def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
if netF == 'global_pool':
net = PoolingF()
elif netF == 'reshape':
net = ReshapeF()
elif netF == 'sample':
net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'mlp_sample':
net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'strided_conv':
net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids)
elif netF == 'mlp_samplev2':
net = PatchSampleFv2(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
else:
raise NotImplementedError('projection model name [%s] is not recognized' % netF)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leaky RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif 'stylegan2' in netD:
net = StyleGAN2Discriminator(input_nc, ndf, n_layers_D, no_antialias=no_antialias, opt=opt)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids,
initialize_weights=('stylegan2' not in netD))
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp', 'nonsaturating']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
bs = prediction.size(0)
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
elif self.gan_mode == 'nonsaturating':
if target_is_real:
loss = F.softplus(-prediction).view(bs, -1).mean(dim=1)
else:
loss = F.softplus(prediction).view(bs, -1).mean(dim=1)
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm + 1e-7)
return out
class PoolingF(nn.Module):
def __init__(self):
super(PoolingF, self).__init__()
model = [nn.AdaptiveMaxPool2d(1)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
return self.l2norm(self.model(x))
class ReshapeF(nn.Module):
def __init__(self):
super(ReshapeF, self).__init__()
model = [nn.AdaptiveAvgPool2d(4)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
x = self.model(x)
x_reshape = x.permute(0, 2, 3, 1).flatten(0, 2)
return self.l2norm(x_reshape)
class StridedConvF(nn.Module):
def __init__(self, init_type='normal', init_gain=0.02, gpu_ids=[]):
super().__init__()
# self.conv1 = nn.Conv2d(256, 128, 3, stride=2)
# self.conv2 = nn.Conv2d(128, 64, 3, stride=1)
self.l2_norm = Normalize(2)
self.mlps = {}
self.moving_averages = {}
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, x):
C, H = x.shape[1], x.shape[2]
n_down = int(np.rint(np.log2(H / 32)))
mlp = []
for i in range(n_down):
mlp.append(nn.Conv2d(C, max(C // 2, 64), 3, stride=2))
mlp.append(nn.ReLU())
C = max(C // 2, 64)
mlp.append(nn.Conv2d(C, 64, 3))
mlp = nn.Sequential(*mlp)
init_net(mlp, self.init_type, self.init_gain, self.gpu_ids)
return mlp
def update_moving_average(self, key, x):
if key not in self.moving_averages:
self.moving_averages[key] = x.detach()
self.moving_averages[key] = self.moving_averages[key] * 0.999 + x.detach() * 0.001
def forward(self, x, use_instance_norm=False):
C, H = x.shape[1], x.shape[2]
key = '%d_%d' % (C, H)
if key not in self.mlps:
self.mlps[key] = self.create_mlp(x)
self.add_module("child_%s" % key, self.mlps[key])
mlp = self.mlps[key]
x = mlp(x)
self.update_moving_average(key, x)
x = x - self.moving_averages[key]
if use_instance_norm:
x = F.instance_norm(x)
return self.l2_norm(x)
class PatchSampleF(nn.Module):
def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]):
# potential issues: currently, we use the same patch_ids for multiple images in the batch
super(PatchSampleF, self).__init__()
self.l2norm = Normalize(2)
self.use_mlp = use_mlp
self.nc = nc # hard-coded
self.mlp_init = False
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, feats):
for mlp_id, feat in enumerate(feats):
input_nc = feat.shape[1]
mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])
if len(self.gpu_ids) > 0:
mlp.cuda()
setattr(self, 'mlp_%d' % mlp_id, mlp)
init_net(self, self.init_type, self.init_gain, self.gpu_ids)
self.mlp_init = True
def forward(self, feats, num_patches=64, patch_ids=None):
return_ids = []
return_feats = []
if self.use_mlp and not self.mlp_init:
self.create_mlp(feats)
for feat_id, feat in enumerate(feats):
B, H, W = feat.shape[0], feat.shape[2], feat.shape[3]
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2)
if num_patches > 0:
if patch_ids is not None:
patch_id = patch_ids[feat_id]
else:
patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device)
patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # b,num_patch, c
else:
x_sample = feat_reshape
patch_id = []
if self.use_mlp:
mlp = getattr(self, 'mlp_%d' % feat_id)
x_sample = mlp(x_sample)
return_ids.append(patch_id)
x_sample = self.l2norm(x_sample)
if num_patches == 0:
x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])
return_feats.append(x_sample)
return return_feats, return_ids
class PatchSampleFv2(nn.Module):
def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]):
# potential issues: currently, we use the same patch_ids for multiple images in the batch
super(PatchSampleFv2, self).__init__()
self.l2norm = Normalize(2)
self.use_mlp = use_mlp
self.nc = nc # hard-coded
self.mlp_init = False
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, feats):
for mlp_id, feat in enumerate(feats):
input_nc = feat.shape[1]
mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])
if len(self.gpu_ids) > 0:
mlp.cuda()
setattr(self, 'mlp_%d' % mlp_id, mlp)
init_net(self, self.init_type, self.init_gain, self.gpu_ids)
self.mlp_init = True
def forward(self, feats, num_patches=64, patch_ids=None):
return_ids = []
return_feats = []
if self.use_mlp and not self.mlp_init:
self.create_mlp(feats)
for feat_id, feat in enumerate(feats):
B, C, H, W = feat.size()
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2)
if num_patches > 0:
if patch_ids is not None:
patch_id = patch_ids[feat_id]
else:
patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device)
patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
x_sample = feat_reshape[:, patch_id, :] # b, num_patch, c
else:
x_sample = feat_reshape
patch_id = []
if self.use_mlp:
mlp = getattr(self, 'mlp_%d' % feat_id)
x_sample = mlp(x_sample)
return_ids.append(patch_id)
x_sample = self.l2norm(x_sample)
# if num_patches == 0:
# x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])
return_feats.append(x_sample)
return return_feats, return_ids
class G_Resnet(nn.Module):
def __init__(self, input_nc, output_nc, nz, num_downs, n_res, ngf=64,
norm=None, nl_layer=None):
super(G_Resnet, self).__init__()
n_downsample = num_downs
pad_type = 'reflect'
self.enc_content = ContentEncoder(n_downsample, n_res, input_nc, ngf, norm, nl_layer, pad_type=pad_type)
if nz == 0:
self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
else:
self.dec = Decoder_all(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
def decode(self, content, style=None):
return self.dec(content, style)
def forward(self, image, style=None, nce_layers=[], encode_only=False):
content, feats = self.enc_content(image, nce_layers=nce_layers, encode_only=encode_only)
if encode_only:
return feats
else:
images_recon = self.decode(content, style)
if len(nce_layers) > 0:
return images_recon, feats
else:
return images_recon
##################################################################################
# Encoder and Decoders
##################################################################################
class E_adaIN(nn.Module):
def __init__(self, input_nc, output_nc=1, nef=64, n_layers=4,
norm=None, nl_layer=None, vae=False):
# style encoder
super(E_adaIN, self).__init__()
self.enc_style = StyleEncoder(n_layers, input_nc, nef, output_nc, norm='none', activ='relu', vae=vae)
def forward(self, image):
style = self.enc_style(image)
return style
class StyleEncoder(nn.Module):
def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, vae=False):
super(StyleEncoder, self).__init__()
self.vae = vae
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
for i in range(2):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
for i in range(n_downsample - 2):
self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling
if self.vae:
self.fc_mean = nn.Linear(dim, style_dim) # , 1, 1, 0)
self.fc_var = nn.Linear(dim, style_dim) # , 1, 1, 0)
else:
self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
if self.vae:
output = self.model(x)
output = output.view(x.size(0), -1)
output_mean = self.fc_mean(output)
output_var = self.fc_var(output)
return output_mean, output_var
else:
return self.model(x).view(x.size(0), -1)
class ContentEncoder(nn.Module):
def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type='zero'):
super(ContentEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
# downsampling blocks
for i in range(n_downsample):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
# residual blocks
self.model += [ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x, nce_layers=[], encode_only=False):
if len(nce_layers) > 0:
feat = x
feats = []
for layer_id, layer in enumerate(self.model):
feat = layer(feat)
if layer_id in nce_layers:
feats.append(feat)
if layer_id == nce_layers[-1] and encode_only:
return None, feats
return feat, feats
else:
return self.model(x), None
for layer_id, layer in enumerate(self.model):
print(layer_id, layer)
class Decoder_all(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder_all, self).__init__()
# AdaIN residual blocks
self.resnet_block = ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)
self.n_blocks = 0
# upsampling blocks
for i in range(n_upsample):
block = [Upsample2(scale_factor=2), Conv2dBlock(dim + nz, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
setattr(self, 'block_{:d}'.format(self.n_blocks), nn.Sequential(*block))
self.n_blocks += 1
dim //= 2
# use reflection padding in the last conv layer
setattr(self, 'block_{:d}'.format(self.n_blocks), Conv2dBlock(dim + nz, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect'))
self.n_blocks += 1
def forward(self, x, y=None):
if y is not None:
output = self.resnet_block(cat_feature(x, y))
for n in range(self.n_blocks):
block = getattr(self, 'block_{:d}'.format(n))
if n > 0:
output = block(cat_feature(output, y))
else:
output = block(output)
return output
class Decoder(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder, self).__init__()
self.model = []
# AdaIN residual blocks
self.model += [ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)]
# upsampling blocks
for i in range(n_upsample):
if i == 0:
input_dim = dim + nz
else:
input_dim = dim
self.model += [Upsample2(scale_factor=2), Conv2dBlock(input_dim, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
dim //= 2
# use reflection padding in the last conv layer
self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')]
self.model = nn.Sequential(*self.model)
def forward(self, x, y=None):
if y is not None:
return self.model(cat_feature(x, y))
else:
return self.model(x)
##################################################################################
# Sequential Models
##################################################################################
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlocks, self).__init__()
self.model = []
for i in range(num_blocks):
self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type, nz=nz)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
##################################################################################
# Basic Blocks
##################################################################################
def cat_feature(x, y):
y_expand = y.view(y.size(0), y.size(1), 1, 1).expand(
y.size(0), y.size(1), x.size(2), x.size(3))
x_cat = torch.cat([x, y_expand], 1)
return x_cat
class ResBlock(nn.Module):
def __init__(self, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlock, self).__init__()
model = []
model += [Conv2dBlock(dim + nz, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)]
model += [Conv2dBlock(dim, dim + nz, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)]
self.model = nn.Sequential(*model)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
class Conv2dBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride,
padding=0, norm='none', activation='relu', pad_type='zero'):
super(Conv2dBlock, self).__init__()
self.use_bias = True
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
# initialize convolution
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class LinearBlock(nn.Module):
def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
# initialize fully connected layer
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
##################################################################################
# Normalization layers
##################################################################################
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.opt = opt
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if no_antialias_up:
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1, # output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, layers=[], encode_only=False):
if -1 in layers:
layers.append(len(self.model))
if len(layers) > 0:
feat = input
feats = []
for layer_id, layer in enumerate(self.model):
# print(layer_id, layer)
feat = layer(feat)
if layer_id in layers:
# print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
feats.append(feat)
else:
# print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
pass
if layer_id == layers[-1] and encode_only:
# print('encoder only return features')
return feats # return intermediate features alone; stop in the last layers
return feat, feats # return both output and intermediate features
else:
"""Standard forward"""
fake = self.model(input)
return fake
class ResnetDecoder(nn.Module):
"""Resnet-based decoder that consists of a few Resnet blocks + a few upsampling operations.
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based decoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetDecoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = []
n_downsampling = 2
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if(no_antialias):
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetEncoder(nn.Module):
"""Resnet-based encoder that consists of a few downsampling + several Resnet blocks
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based encoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetEncoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
if(no_antialias):
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
else:
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
if(no_antialias):
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
else:
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
Downsample(ndf * nf_mult)]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
class PatchDiscriminator(NLayerDiscriminator):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
super().__init__(input_nc, ndf, 2, norm_layer, no_antialias)
def forward(self, input):
B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)
size = 16
Y = H // size
X = W // size
input = input.view(B, C, Y, size, X, size)
input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)
return super().forward(input)
class GroupedChannelNorm(nn.Module):
def __init__(self, num_groups):
super().__init__()
self.num_groups = num_groups
def forward(self, x):
shape = list(x.shape)
new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups] + shape[2:]
x = x.view(*new_shape)
mean = x.mean(dim=2, keepdim=True)
std = x.std(dim=2, keepdim=True)
x_norm = (x - mean) / (std + 1e-7)
return x_norm.view(*shape) |
the-stack_0_24276 | # Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from neutronclient._i18n import _
from neutronclient.common import utils
from neutronclient.common import validators
from neutronclient.neutron import v2_0 as neutronv20
from neutronclient.neutron.v2_0.bgp import peer as bgp_peer
# Allowed BGP Autonomous number range
MIN_AS_NUM = 1
MAX_AS_NUM = 65535
def get_network_id(client, id_or_name):
return neutronv20.find_resourceid_by_name_or_id(client,
'network',
id_or_name)
def get_bgp_speaker_id(client, id_or_name):
return neutronv20.find_resourceid_by_name_or_id(client,
'bgp_speaker',
id_or_name)
def validate_speaker_attributes(parsed_args):
# Validate AS number
validators.validate_int_range(parsed_args, 'local_as',
MIN_AS_NUM, MAX_AS_NUM)
def add_common_arguments(parser):
utils.add_boolean_argument(
parser, '--advertise-floating-ip-host-routes',
help=_('Whether to enable or disable the advertisement '
'of floating-ip host routes by the BGP speaker. '
'By default floating ip host routes will be '
'advertised by the BGP speaker.'))
utils.add_boolean_argument(
parser, '--advertise-tenant-networks',
help=_('Whether to enable or disable the advertisement '
'of tenant network routes by the BGP speaker. '
'By default tenant network routes will be '
'advertised by the BGP speaker.'))
def args2body_common_arguments(body, parsed_args):
neutronv20.update_dict(parsed_args, body,
['name',
'advertise_floating_ip_host_routes',
'advertise_tenant_networks'])
class ListSpeakers(neutronv20.ListCommand):
"""List BGP speakers."""
resource = 'bgp_speaker'
list_columns = ['id', 'name', 'local_as', 'ip_version']
pagination_support = True
sorting_support = True
class ShowSpeaker(neutronv20.ShowCommand):
"""Show information of a given BGP speaker."""
resource = 'bgp_speaker'
class CreateSpeaker(neutronv20.CreateCommand):
"""Create a BGP Speaker."""
resource = 'bgp_speaker'
def add_known_arguments(self, parser):
parser.add_argument(
'name',
metavar='NAME',
help=_('Name of the BGP speaker to create.'))
parser.add_argument(
'--local-as',
metavar='LOCAL_AS',
required=True,
help=_('Local AS number. (Integer in [%(min_val)s, %(max_val)s] '
'is allowed.)') % {'min_val': MIN_AS_NUM,
'max_val': MAX_AS_NUM})
parser.add_argument(
'--ip-version',
type=int, choices=[4, 6],
default=4,
help=_('IP version for the BGP speaker (default is 4).'))
add_common_arguments(parser)
def args2body(self, parsed_args):
body = {}
validate_speaker_attributes(parsed_args)
body['local_as'] = parsed_args.local_as
body['ip_version'] = parsed_args.ip_version
args2body_common_arguments(body, parsed_args)
return {self.resource: body}
class UpdateSpeaker(neutronv20.UpdateCommand):
"""Update BGP Speaker's information."""
resource = 'bgp_speaker'
def add_known_arguments(self, parser):
parser.add_argument(
'--name',
help=_('Name of the BGP speaker to update.'))
add_common_arguments(parser)
def args2body(self, parsed_args):
body = {}
args2body_common_arguments(body, parsed_args)
return {self.resource: body}
class DeleteSpeaker(neutronv20.DeleteCommand):
"""Delete a BGP speaker."""
resource = 'bgp_speaker'
class AddPeerToSpeaker(neutronv20.NeutronCommand):
"""Add a peer to the BGP speaker."""
def get_parser(self, prog_name):
parser = super(AddPeerToSpeaker, self).get_parser(prog_name)
parser.add_argument(
'bgp_speaker',
metavar='BGP_SPEAKER',
help=_('ID or name of the BGP speaker.'))
parser.add_argument(
'bgp_peer',
metavar='BGP_PEER',
help=_('ID or name of the BGP peer to add.'))
return parser
def take_action(self, parsed_args):
neutron_client = self.get_client()
_speaker_id = get_bgp_speaker_id(neutron_client,
parsed_args.bgp_speaker)
_peer_id = bgp_peer.get_bgp_peer_id(neutron_client,
parsed_args.bgp_peer)
neutron_client.add_peer_to_bgp_speaker(_speaker_id,
{'bgp_peer_id': _peer_id})
print(_('Added BGP peer %(peer)s to BGP speaker %(speaker)s.') %
{'peer': parsed_args.bgp_peer,
'speaker': parsed_args.bgp_speaker},
file=self.app.stdout)
class RemovePeerFromSpeaker(neutronv20.NeutronCommand):
"""Remove a peer from the BGP speaker."""
def get_parser(self, prog_name):
parser = super(RemovePeerFromSpeaker, self).get_parser(prog_name)
parser.add_argument(
'bgp_speaker',
metavar='BGP_SPEAKER',
help=_('ID or name of the BGP speaker.'))
parser.add_argument(
'bgp_peer',
metavar='BGP_PEER',
help=_('ID or name of the BGP peer to remove.'))
return parser
def take_action(self, parsed_args):
neutron_client = self.get_client()
_speaker_id = get_bgp_speaker_id(neutron_client,
parsed_args.bgp_speaker)
_peer_id = bgp_peer.get_bgp_peer_id(neutron_client,
parsed_args.bgp_peer)
neutron_client.remove_peer_from_bgp_speaker(_speaker_id,
{'bgp_peer_id': _peer_id})
print(_('Removed BGP peer %(peer)s from BGP speaker %(speaker)s.') %
{'peer': parsed_args.bgp_peer,
'speaker': parsed_args.bgp_speaker},
file=self.app.stdout)
class AddNetworkToSpeaker(neutronv20.NeutronCommand):
"""Add a network to the BGP speaker."""
def get_parser(self, prog_name):
parser = super(AddNetworkToSpeaker, self).get_parser(prog_name)
parser.add_argument(
'bgp_speaker',
metavar='BGP_SPEAKER',
help=_('ID or name of the BGP speaker.'))
parser.add_argument(
'network',
metavar='NETWORK',
help=_('ID or name of the network to add.'))
return parser
def take_action(self, parsed_args):
neutron_client = self.get_client()
_speaker_id = get_bgp_speaker_id(neutron_client,
parsed_args.bgp_speaker)
_net_id = get_network_id(neutron_client,
parsed_args.network)
neutron_client.add_network_to_bgp_speaker(_speaker_id,
{'network_id': _net_id})
print(_('Added network %(net)s to BGP speaker %(speaker)s.') %
{'net': parsed_args.network, 'speaker': parsed_args.bgp_speaker},
file=self.app.stdout)
class RemoveNetworkFromSpeaker(neutronv20.NeutronCommand):
"""Remove a network from the BGP speaker."""
def get_parser(self, prog_name):
parser = super(RemoveNetworkFromSpeaker, self).get_parser(prog_name)
parser.add_argument(
'bgp_speaker',
metavar='BGP_SPEAKER',
help=_('ID or name of the BGP speaker.'))
parser.add_argument(
'network',
metavar='NETWORK',
help=_('ID or name of the network to remove.'))
return parser
def take_action(self, parsed_args):
neutron_client = self.get_client()
_speaker_id = get_bgp_speaker_id(neutron_client,
parsed_args.bgp_speaker)
_net_id = get_network_id(neutron_client,
parsed_args.network)
neutron_client.remove_network_from_bgp_speaker(_speaker_id,
{'network_id': _net_id})
print(_('Removed network %(net)s from BGP speaker %(speaker)s.') %
{'net': parsed_args.network, 'speaker': parsed_args.bgp_speaker},
file=self.app.stdout)
class ListRoutesAdvertisedBySpeaker(neutronv20.ListCommand):
"""List routes advertised by a given BGP speaker."""
list_columns = ['id', 'destination', 'next_hop']
resource = 'advertised_route'
pagination_support = True
sorting_support = True
def get_parser(self, prog_name):
parser = super(ListRoutesAdvertisedBySpeaker,
self).get_parser(prog_name)
parser.add_argument(
'bgp_speaker',
metavar='BGP_SPEAKER',
help=_('ID or name of the BGP speaker.'))
return parser
def call_server(self, neutron_client, search_opts, parsed_args):
_speaker_id = get_bgp_speaker_id(neutron_client,
parsed_args.bgp_speaker)
data = neutron_client.list_route_advertised_from_bgp_speaker(
_speaker_id, **search_opts)
return data
|
the-stack_0_24277 | #!/usr/bin/env python3
""" Find the day with the highest average temperature.
Write a program that takes a filename on the command line and processes the
CSV contents. The contents will be a CSV file with a month of weather data,
one day per line.
Determine which day had the highest average temperature where the average
temperature is the average of the day's high and low temperatures. This is
not normally how average temperature is computed, but it will work for our
demonstration.
The first line of the CSV file will be column headers:
Day,MxT,MnT,AvT,AvDP,1HrP TPcn,PDir,AvSp,Dir,MxS,SkyC,MxR,Mn,R AvSLP
The day number, max temperature, and min temperature are the first three
columns.
Write unit tests with Pytest to test your program.
"""
import csv_parser
def get_name_and_avg(day_stats):
day_number = int(day_stats["Day"])
avg = (int(day_stats["MxT"]) + int(day_stats["MnT"])) / 2
return day_number, avg
def get_max_avg(filename):
with open(filename, "r", newline="") as csv_file:
return max(
csv_parser.get_next_result(csv_file, get_name_and_avg),
key=lambda item: item[1],
)
|
the-stack_0_24281 | import json
import logging
import sys
if sys.version_info.major == 2:
import urllib2
else:
import urllib.request as urllib2
class Nominatim(object):
"""Class for querying text adress
http://wiki.openstreetmap.org/wiki/Nominatim#Search"""
def __init__(self):
self.url = 'http://nominatim.openstreetmap.org/search?format=json'
self.logger = logging.getLogger(__name__)
def query(self, query, acceptlanguage='', limit=None):
"""Method takes query string, acceptlanguage string (rfc2616 language
code), limit integer (limits number of results)."""
query = query.replace(' ', '+')
url = self.url
url += '&q=' + query
if acceptlanguage:
url += '&accept-language=' + acceptlanguage
if limit:
url += '&limit=' + str(limit)
self.logger.debug('url:\n' + url)
try:
response = urllib2.urlopen(url)
if response.code == 200:
result = json.loads(response.read().decode('utf-8'))
return result
else:
return None
except urllib2.URLError:
self.logger.info('Server connection problem')
return None
pass
class NominatimReverse(object):
"""Class for querying gps coordinates
http://wiki.openstreetmap.org/wiki/Nominatim#Reverse_Geocoding_.2F_Address_lookup"""
def __init__(self):
self.url = 'http://nominatim.openstreetmap.org/reverse?format=json'
self.logger = logging.getLogger(__name__)
def query(self, lat=None, lon=None, acceptlanguage='', zoom=18):
"""Method takes lat and lon for GPS coordinates, acceptlanguage string,
zoom integer (between from 0 to 18). """
url = self.url
if lat and lon:
url += '&lat=' + str(lat) + '&lon=' + str(lon)
if acceptlanguage:
url += '&accept-language=' + acceptlanguage
if zoom < 0 or zoom > 18:
raise Exception('zoom must be betwen 0 and 18')
url +='&zoom=' + str(zoom)
self.logger.debug('url:\n' + url)
try:
response = urllib2.urlopen(url)
if response.code == 200:
result = json.loads(response.read().decode('utf-8'))
return result
else:
return None
except urllib2.URLError:
self.logger.info('Server connection problem')
return None
pass
|
the-stack_0_24283 | #!/usr/bin/env python
import argparse
import logging.config
import os
import time
from collections import defaultdict
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
List,
Literal,
Optional,
Tuple,
Union,
cast,
)
import gevent
from rotkehlchen.accounting.accountant import Accountant
from rotkehlchen.accounting.structures.balance import Balance, BalanceType
from rotkehlchen.api.websockets.notifier import RotkiNotifier
from rotkehlchen.api.websockets.typedefs import WSMessageType
from rotkehlchen.assets.asset import Asset
from rotkehlchen.balances.manual import (
account_for_manually_tracked_asset_balances,
get_manually_tracked_balances,
)
from rotkehlchen.chain.avalanche.manager import AvalancheManager
from rotkehlchen.chain.ethereum.accounting.aggregator import EVMAccountingAggregator
from rotkehlchen.chain.ethereum.decoding import EVMTransactionDecoder
from rotkehlchen.chain.ethereum.manager import (
ETHEREUM_NODES_TO_CONNECT_AT_START,
EthereumManager,
NodeName,
)
from rotkehlchen.chain.ethereum.oracles.saddle import SaddleOracle
from rotkehlchen.chain.ethereum.oracles.uniswap import UniswapV2Oracle, UniswapV3Oracle
from rotkehlchen.chain.ethereum.transactions import EthTransactions
from rotkehlchen.chain.manager import BlockchainBalancesUpdate, ChainManager
from rotkehlchen.chain.substrate.manager import SubstrateManager
from rotkehlchen.chain.substrate.types import SubstrateChain
from rotkehlchen.chain.substrate.utils import (
KUSAMA_NODES_TO_CONNECT_AT_START,
POLKADOT_NODES_TO_CONNECT_AT_START,
)
from rotkehlchen.config import default_data_directory
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.data.importer import DataImporter
from rotkehlchen.data_handler import DataHandler
from rotkehlchen.data_migrations.manager import DataMigrationManager
from rotkehlchen.db.settings import DBSettings, ModifiableDBSettings
from rotkehlchen.errors.api import PremiumAuthenticationError
from rotkehlchen.errors.misc import EthSyncError, InputError, RemoteError, SystemPermissionError
from rotkehlchen.exchanges.manager import ExchangeManager
from rotkehlchen.externalapis.beaconchain import BeaconChain
from rotkehlchen.externalapis.coingecko import Coingecko
from rotkehlchen.externalapis.covalent import Covalent, chains_id
from rotkehlchen.externalapis.cryptocompare import Cryptocompare
from rotkehlchen.externalapis.etherscan import Etherscan
from rotkehlchen.fval import FVal
from rotkehlchen.globaldb import GlobalDBHandler
from rotkehlchen.globaldb.updates import AssetsUpdater
from rotkehlchen.greenlets import GreenletManager
from rotkehlchen.history.events import EventsHistorian
from rotkehlchen.history.price import PriceHistorian
from rotkehlchen.history.types import HistoricalPriceOracle
from rotkehlchen.icons import IconManager
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.logging import RotkehlchenLogsAdapter, configure_logging
from rotkehlchen.premium.premium import Premium, PremiumCredentials, premium_create_and_verify
from rotkehlchen.premium.sync import PremiumSyncManager
from rotkehlchen.tasks.manager import DEFAULT_MAX_TASKS_NUM, TaskManager
from rotkehlchen.types import (
ApiKey,
ApiSecret,
BlockchainAccountData,
ChecksumEthAddress,
ListOfBlockchainAddresses,
Location,
SupportedBlockchain,
Timestamp,
)
from rotkehlchen.usage_analytics import maybe_submit_usage_analytics
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.misc import combine_dicts
if TYPE_CHECKING:
from rotkehlchen.chain.bitcoin.xpub import XpubData
from rotkehlchen.exchanges.kraken import KrakenAccountType
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
MAIN_LOOP_SECS_DELAY = 10
ICONS_BATCH_SIZE = 3
ICONS_QUERY_SLEEP = 60
class Rotkehlchen():
def __init__(self, args: argparse.Namespace) -> None:
"""Initialize the Rotkehlchen object
This runs during backend initialization so it should be as light as possible.
May Raise:
- SystemPermissionError if the given data directory's permissions
are not correct.
"""
# Can also be None after unlock if premium credentials did not
# authenticate or premium server temporarily offline
self.premium: Optional[Premium] = None
self.user_is_logged_in: bool = False
configure_logging(args)
self.sleep_secs = args.sleep_secs
if args.data_dir is None:
self.data_dir = default_data_directory()
else:
self.data_dir = Path(args.data_dir)
self.data_dir.mkdir(parents=True, exist_ok=True)
if not os.access(self.data_dir, os.W_OK | os.R_OK):
raise SystemPermissionError(
f'The given data directory {self.data_dir} is not readable or writable',
)
self.main_loop_spawned = False
self.args = args
self.api_task_greenlets: List[gevent.Greenlet] = []
self.msg_aggregator = MessagesAggregator()
self.greenlet_manager = GreenletManager(msg_aggregator=self.msg_aggregator)
self.rotki_notifier = RotkiNotifier(greenlet_manager=self.greenlet_manager)
self.msg_aggregator.rotki_notifier = self.rotki_notifier
self.exchange_manager = ExchangeManager(msg_aggregator=self.msg_aggregator)
# Initialize the GlobalDBHandler singleton. Has to be initialized BEFORE asset resolver
GlobalDBHandler(data_dir=self.data_dir)
self.data = DataHandler(self.data_dir, self.msg_aggregator)
self.cryptocompare = Cryptocompare(data_directory=self.data_dir, database=None)
self.coingecko = Coingecko()
self.icon_manager = IconManager(data_dir=self.data_dir, coingecko=self.coingecko)
self.assets_updater = AssetsUpdater(self.msg_aggregator)
# Initialize the Inquirer singleton
Inquirer(
data_dir=self.data_dir,
cryptocompare=self.cryptocompare,
coingecko=self.coingecko,
)
self.task_manager: Optional[TaskManager] = None
self.shutdown_event = gevent.event.Event()
def reset_after_failed_account_creation_or_login(self) -> None:
"""If the account creation or login failed make sure that the rotki instance is clear
Tricky instances are when after either failed premium credentials or user refusal
to sync premium databases we relogged in
"""
self.cryptocompare.db = None
def unlock_user(
self,
user: str,
password: str,
create_new: bool,
sync_approval: Literal['yes', 'no', 'unknown'],
premium_credentials: Optional[PremiumCredentials],
initial_settings: Optional[ModifiableDBSettings] = None,
sync_database: bool = True,
) -> None:
"""Unlocks an existing user or creates a new one if `create_new` is True
May raise:
- PremiumAuthenticationError if the password can't unlock the database.
- AuthenticationError if premium_credentials are given and are invalid
or can't authenticate with the server
- DBUpgradeError if the rotki DB version is newer than the software or
there is a DB upgrade and there is an error.
- SystemPermissionError if the directory or DB file can not be accessed
"""
log.info(
'Unlocking user',
user=user,
create_new=create_new,
sync_approval=sync_approval,
sync_database=sync_database,
initial_settings=initial_settings,
)
# unlock or create the DB
self.password = password
self.user_directory = self.data.unlock(user, password, create_new, initial_settings)
# Run the DB integrity check due to https://github.com/rotki/rotki/issues/3010
# TODO: Hopefully onece 3010 is handled this can go away
self.greenlet_manager.spawn_and_track(
after_seconds=None,
task_name='user DB data integrity check',
exception_is_error=False,
method=self.data.db.ensure_data_integrity,
)
self.data_importer = DataImporter(db=self.data.db)
self.last_data_upload_ts = self.data.db.get_last_data_upload_ts()
self.premium_sync_manager = PremiumSyncManager(data=self.data, password=password)
# set the DB in the external services instances that need it
self.cryptocompare.set_database(self.data.db)
# Anything that was set above here has to be cleaned in case of failure in the next step
# by reset_after_failed_account_creation_or_login()
try:
self.premium = self.premium_sync_manager.try_premium_at_start(
given_premium_credentials=premium_credentials,
username=user,
create_new=create_new,
sync_approval=sync_approval,
sync_database=sync_database,
)
except PremiumAuthenticationError:
# Reraise it only if this is during the creation of a new account where
# the premium credentials were given by the user
if create_new:
raise
self.msg_aggregator.add_warning(
'Could not authenticate the rotki premium API keys found in the DB.'
' Has your subscription expired?',
)
# else let's just continue. User signed in succesfully, but he just
# has unauthenticable/invalid premium credentials remaining in his DB
settings = self.get_settings()
self.greenlet_manager.spawn_and_track(
after_seconds=None,
task_name='submit_usage_analytics',
exception_is_error=False,
method=maybe_submit_usage_analytics,
data_dir=self.data_dir,
should_submit=settings.submit_usage_analytics,
)
self.etherscan = Etherscan(database=self.data.db, msg_aggregator=self.msg_aggregator)
self.beaconchain = BeaconChain(database=self.data.db, msg_aggregator=self.msg_aggregator)
eth_rpc_endpoint = settings.eth_rpc_endpoint
# Initialize the price historian singleton
PriceHistorian(
data_directory=self.data_dir,
cryptocompare=self.cryptocompare,
coingecko=self.coingecko,
)
PriceHistorian().set_oracles_order(settings.historical_price_oracles)
exchange_credentials = self.data.db.get_exchange_credentials()
self.exchange_manager.initialize_exchanges(
exchange_credentials=exchange_credentials,
database=self.data.db,
)
# Initialize blockchain querying modules
ethereum_manager = EthereumManager(
ethrpc_endpoint=eth_rpc_endpoint,
etherscan=self.etherscan,
msg_aggregator=self.msg_aggregator,
greenlet_manager=self.greenlet_manager,
connect_at_start=ETHEREUM_NODES_TO_CONNECT_AT_START,
)
kusama_manager = SubstrateManager(
chain=SubstrateChain.KUSAMA,
msg_aggregator=self.msg_aggregator,
greenlet_manager=self.greenlet_manager,
connect_at_start=KUSAMA_NODES_TO_CONNECT_AT_START,
connect_on_startup=self._connect_ksm_manager_on_startup(),
own_rpc_endpoint=settings.ksm_rpc_endpoint,
)
polkadot_manager = SubstrateManager(
chain=SubstrateChain.POLKADOT,
msg_aggregator=self.msg_aggregator,
greenlet_manager=self.greenlet_manager,
connect_at_start=POLKADOT_NODES_TO_CONNECT_AT_START,
connect_on_startup=self._connect_dot_manager_on_startup(),
own_rpc_endpoint=settings.dot_rpc_endpoint,
)
self.eth_transactions = EthTransactions(ethereum=ethereum_manager, database=self.data.db)
self.covalent_avalanche = Covalent(
database=self.data.db,
msg_aggregator=self.msg_aggregator,
chain_id=chains_id['avalanche'],
)
avalanche_manager = AvalancheManager(
avaxrpc_endpoint="https://api.avax.network/ext/bc/C/rpc",
covalent=self.covalent_avalanche,
msg_aggregator=self.msg_aggregator,
)
Inquirer().inject_ethereum(ethereum_manager)
uniswap_v2_oracle = UniswapV2Oracle(ethereum_manager)
uniswap_v3_oracle = UniswapV3Oracle(ethereum_manager)
saddle_oracle = SaddleOracle(ethereum_manager)
Inquirer().add_defi_oracles(
uniswap_v2=uniswap_v2_oracle,
uniswap_v3=uniswap_v3_oracle,
saddle=saddle_oracle,
)
Inquirer().set_oracles_order(settings.current_price_oracles)
self.chain_manager = ChainManager(
blockchain_accounts=self.data.db.get_blockchain_accounts(),
ethereum_manager=ethereum_manager,
kusama_manager=kusama_manager,
polkadot_manager=polkadot_manager,
avalanche_manager=avalanche_manager,
msg_aggregator=self.msg_aggregator,
database=self.data.db,
greenlet_manager=self.greenlet_manager,
premium=self.premium,
eth_modules=settings.active_modules,
data_directory=self.data_dir,
beaconchain=self.beaconchain,
btc_derivation_gap_limit=settings.btc_derivation_gap_limit,
)
self.evm_tx_decoder = EVMTransactionDecoder(
database=self.data.db,
ethereum_manager=ethereum_manager,
eth_transactions=self.eth_transactions,
msg_aggregator=self.msg_aggregator,
)
self.evm_accounting_aggregator = EVMAccountingAggregator(
ethereum_manager=ethereum_manager,
msg_aggregator=self.msg_aggregator,
)
self.accountant = Accountant(
db=self.data.db,
msg_aggregator=self.msg_aggregator,
evm_accounting_aggregator=self.evm_accounting_aggregator,
premium=self.premium,
)
self.events_historian = EventsHistorian(
user_directory=self.user_directory,
db=self.data.db,
msg_aggregator=self.msg_aggregator,
exchange_manager=self.exchange_manager,
chain_manager=self.chain_manager,
evm_tx_decoder=self.evm_tx_decoder,
eth_transactions=self.eth_transactions,
)
self.task_manager = TaskManager(
max_tasks_num=DEFAULT_MAX_TASKS_NUM,
greenlet_manager=self.greenlet_manager,
api_task_greenlets=self.api_task_greenlets,
database=self.data.db,
cryptocompare=self.cryptocompare,
premium_sync_manager=self.premium_sync_manager,
chain_manager=self.chain_manager,
exchange_manager=self.exchange_manager,
eth_transactions=self.eth_transactions,
evm_tx_decoder=self.evm_tx_decoder,
deactivate_premium=self.deactivate_premium_status,
query_balances=self.query_balances,
)
DataMigrationManager(self).maybe_migrate_data()
self.greenlet_manager.spawn_and_track(
after_seconds=5,
task_name='periodically_query_icons_until_all_cached',
exception_is_error=False,
method=self.icon_manager.periodically_query_icons_until_all_cached,
batch_size=ICONS_BATCH_SIZE,
sleep_time_secs=ICONS_QUERY_SLEEP,
)
self.user_is_logged_in = True
log.debug('User unlocking complete')
def _logout(self) -> None:
if not self.user_is_logged_in:
return
user = self.data.username
log.info(
'Logging out user',
user=user,
)
self.deactivate_premium_status()
self.greenlet_manager.clear()
del self.chain_manager
self.exchange_manager.delete_all_exchanges()
del self.accountant
del self.events_historian
del self.data_importer
self.data.logout()
self.password = ''
self.cryptocompare.unset_database()
# Make sure no messages leak to other user sessions
self.msg_aggregator.consume_errors()
self.msg_aggregator.consume_warnings()
self.task_manager = None
self.user_is_logged_in = False
log.info(
'User successfully logged out',
user=user,
)
def logout(self) -> None:
if self.task_manager is None: # no user logged in?
return
with self.task_manager.schedule_lock:
self._logout()
def set_premium_credentials(self, credentials: PremiumCredentials) -> None:
"""
Sets the premium credentials for rotki
Raises PremiumAuthenticationError if the given key is rejected by the Rotkehlchen server
"""
log.info('Setting new premium credentials')
if self.premium is not None:
self.premium.set_credentials(credentials)
else:
self.premium = premium_create_and_verify(credentials)
self.premium_sync_manager.premium = self.premium
self.accountant.activate_premium_status(self.premium)
self.chain_manager.activate_premium_status(self.premium)
self.data.db.set_rotkehlchen_premium(credentials)
def deactivate_premium_status(self) -> None:
"""Deactivate premium in the current session"""
self.premium = None
self.premium_sync_manager.premium = None
self.accountant.deactivate_premium_status()
self.chain_manager.deactivate_premium_status()
def delete_premium_credentials(self) -> Tuple[bool, str]:
"""Deletes the premium credentials for rotki"""
msg = ''
success = self.data.db.del_rotkehlchen_premium()
if success is False:
msg = 'The database was unable to delete the Premium keys for the logged-in user'
self.deactivate_premium_status()
return success, msg
def start(self) -> gevent.Greenlet:
assert not self.main_loop_spawned, 'Tried to spawn the main loop twice'
greenlet = gevent.spawn(self.main_loop)
self.main_loop_spawned = True
return greenlet
def main_loop(self) -> None:
"""rotki main loop that fires often and runs the task manager's scheduler"""
while self.shutdown_event.wait(timeout=MAIN_LOOP_SECS_DELAY) is not True:
if self.task_manager is not None:
self.task_manager.schedule()
def get_blockchain_account_data(
self,
blockchain: SupportedBlockchain,
) -> Union[List[BlockchainAccountData], Dict[str, Any]]:
account_data = self.data.db.get_blockchain_account_data(blockchain)
if blockchain != SupportedBlockchain.BITCOIN:
return account_data
xpub_data = self.data.db.get_bitcoin_xpub_data()
addresses_to_account_data = {x.address: x for x in account_data}
address_to_xpub_mappings = self.data.db.get_addresses_to_xpub_mapping(
list(addresses_to_account_data.keys()), # type: ignore
)
xpub_mappings: Dict['XpubData', List[BlockchainAccountData]] = {}
for address, xpub_entry in address_to_xpub_mappings.items():
if xpub_entry not in xpub_mappings:
xpub_mappings[xpub_entry] = []
xpub_mappings[xpub_entry].append(addresses_to_account_data[address])
data: Dict[str, Any] = {'standalone': [], 'xpubs': []}
# Add xpub data
for xpub_entry in xpub_data:
data_entry = xpub_entry.serialize()
addresses = xpub_mappings.get(xpub_entry, None)
data_entry['addresses'] = addresses if addresses and len(addresses) != 0 else None
data['xpubs'].append(data_entry)
# Add standalone addresses
for account in account_data:
if account.address not in address_to_xpub_mappings:
data['standalone'].append(account)
return data
def add_blockchain_accounts(
self,
blockchain: SupportedBlockchain,
account_data: List[BlockchainAccountData],
) -> BlockchainBalancesUpdate:
"""Adds new blockchain accounts
Adds the accounts to the blockchain instance and queries them to get the
updated balances. Also adds them in the DB
May raise:
- EthSyncError from modify_blockchain_account
- InputError if the given accounts list is empty.
- TagConstraintError if any of the given account data contain unknown tags.
- RemoteError if an external service such as Etherscan is queried and
there is a problem with its query.
"""
self.data.db.ensure_tags_exist(
given_data=account_data,
action='adding',
data_type='blockchain accounts',
)
address_type = blockchain.get_address_type()
updated_balances = self.chain_manager.add_blockchain_accounts(
blockchain=blockchain,
accounts=[address_type(entry.address) for entry in account_data],
)
self.data.db.add_blockchain_accounts(
blockchain=blockchain,
account_data=account_data,
)
return updated_balances
def edit_blockchain_accounts(
self,
blockchain: SupportedBlockchain,
account_data: List[BlockchainAccountData],
) -> None:
"""Edits blockchain accounts
Edits blockchain account data for the given accounts
May raise:
- InputError if the given accounts list is empty or if
any of the accounts to edit do not exist.
- TagConstraintError if any of the given account data contain unknown tags.
"""
# First check for validity of account data addresses
if len(account_data) == 0:
raise InputError('Empty list of blockchain account data to edit was given')
accounts = [x.address for x in account_data]
unknown_accounts = set(accounts).difference(self.chain_manager.accounts.get(blockchain))
if len(unknown_accounts) != 0:
raise InputError(
f'Tried to edit unknown {blockchain.value} '
f'accounts {",".join(unknown_accounts)}',
)
self.data.db.ensure_tags_exist(
given_data=account_data,
action='editing',
data_type='blockchain accounts',
)
# Finally edit the accounts
self.data.db.edit_blockchain_accounts(
blockchain=blockchain,
account_data=account_data,
)
def remove_blockchain_accounts(
self,
blockchain: SupportedBlockchain,
accounts: ListOfBlockchainAddresses,
) -> BlockchainBalancesUpdate:
"""Removes blockchain accounts
Removes the accounts from the blockchain instance and queries them to get
the updated balances. Also removes them from the DB
May raise:
- RemoteError if an external service such as Etherscan is queried and
there is a problem with its query.
- InputError if a non-existing account was given to remove
"""
balances_update = self.chain_manager.remove_blockchain_accounts(
blockchain=blockchain,
accounts=accounts,
)
eth_addresses: List[ChecksumEthAddress] = cast(List[ChecksumEthAddress], accounts) if blockchain == SupportedBlockchain.ETHEREUM else [] # noqa: E501
with self.eth_transactions.wait_until_no_query_for(eth_addresses):
self.data.db.remove_blockchain_accounts(blockchain, accounts)
return balances_update
def get_history_query_status(self) -> Dict[str, str]:
if self.events_historian.progress < FVal('100'):
processing_state = self.events_historian.processing_state_name
progress = self.events_historian.progress / 2
elif self.accountant.first_processed_timestamp == -1:
processing_state = 'Processing all retrieved historical events'
progress = FVal(50)
else:
processing_state = 'Processing all retrieved historical events'
# start_ts is min of the query start or the first action timestamp since action
# processing can start well before query start to calculate cost basis
start_ts = min(
self.accountant.query_start_ts,
self.accountant.first_processed_timestamp,
)
diff = self.accountant.query_end_ts - start_ts
progress = 50 + 100 * (
FVal(self.accountant.currently_processing_timestamp - start_ts) /
FVal(diff) / 2)
return {'processing_state': str(processing_state), 'total_progress': str(progress)}
def process_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> Tuple[int, str]:
error_or_empty, events = self.events_historian.get_history(
start_ts=start_ts,
end_ts=end_ts,
has_premium=self.premium is not None,
)
report_id = self.accountant.process_history(
start_ts=start_ts,
end_ts=end_ts,
events=events,
)
return report_id, error_or_empty
def query_balances(
self,
requested_save_data: bool = False,
save_despite_errors: bool = False,
timestamp: Timestamp = None,
ignore_cache: bool = False,
) -> Dict[str, Any]:
"""Query all balances rotkehlchen can see.
If requested_save_data is True then the data are always saved in the DB,
if it is False then data are saved if self.data.should_save_balances()
is True.
If save_despite_errors is True then even if there is any error the snapshot
will be saved.
If timestamp is None then the current timestamp is used.
If a timestamp is given then that is the time that the balances are going
to be saved in the DB
If ignore_cache is True then all underlying calls that have a cache ignore it
Returns a dictionary with the queried balances.
"""
log.info(
'query_balances called',
requested_save_data=requested_save_data,
save_despite_errors=save_despite_errors,
)
balances: Dict[str, Dict[Asset, Balance]] = {}
problem_free = True
for exchange in self.exchange_manager.iterate_exchanges():
exchange_balances, error_msg = exchange.query_balances(ignore_cache=ignore_cache)
# If we got an error, disregard that exchange but make sure we don't save data
if not isinstance(exchange_balances, dict):
problem_free = False
self.msg_aggregator.add_message(
message_type=WSMessageType.BALANCE_SNAPSHOT_ERROR,
data={'location': exchange.name, 'error': error_msg},
)
else:
location_str = str(exchange.location)
if location_str not in balances:
balances[location_str] = exchange_balances
else: # multiple exchange of same type. Combine balances
balances[location_str] = combine_dicts(
balances[location_str],
exchange_balances,
)
liabilities: Dict[Asset, Balance]
try:
blockchain_result = self.chain_manager.query_balances(
blockchain=None,
force_token_detection=ignore_cache,
ignore_cache=ignore_cache,
)
if len(blockchain_result.totals.assets) != 0:
balances[str(Location.BLOCKCHAIN)] = blockchain_result.totals.assets
liabilities = blockchain_result.totals.liabilities
except (RemoteError, EthSyncError) as e:
problem_free = False
liabilities = {}
log.error(f'Querying blockchain balances failed due to: {str(e)}')
self.msg_aggregator.add_message(
message_type=WSMessageType.BALANCE_SNAPSHOT_ERROR,
data={'location': 'blockchain balances query', 'error': str(e)},
)
manually_tracked_liabilities = get_manually_tracked_balances(
db=self.data.db,
balance_type=BalanceType.LIABILITY,
)
manual_liabilities_as_dict: DefaultDict[Asset, Balance] = defaultdict(Balance)
for manual_liability in manually_tracked_liabilities:
manual_liabilities_as_dict[manual_liability.asset] += manual_liability.value
liabilities = combine_dicts(liabilities, manual_liabilities_as_dict)
# retrieve loopring balances if module is activated
if self.chain_manager.get_module('loopring'):
try:
loopring_balances = self.chain_manager.get_loopring_balances()
except RemoteError as e:
problem_free = False
self.msg_aggregator.add_message(
message_type=WSMessageType.BALANCE_SNAPSHOT_ERROR,
data={'location': 'loopring', 'error': str(e)},
)
else:
if len(loopring_balances) != 0:
balances[str(Location.LOOPRING)] = loopring_balances
# retrieve nft balances if module is activated
nfts = self.chain_manager.get_module('nfts')
if nfts is not None:
try:
nft_mapping = nfts.get_balances(
addresses=self.chain_manager.queried_addresses_for_module('nfts'),
return_zero_values=False,
ignore_cache=False,
)
except RemoteError as e:
log.error(
f'At balance snapshot NFT balances query failed due to {str(e)}. Error '
f'is ignored and balance snapshot will still be saved.',
)
else:
if len(nft_mapping) != 0:
if str(Location.BLOCKCHAIN) not in balances:
balances[str(Location.BLOCKCHAIN)] = {}
for _, nft_balances in nft_mapping.items():
for balance_entry in nft_balances:
balances[str(Location.BLOCKCHAIN)][Asset(
balance_entry['id'])] = Balance(
amount=FVal(1),
usd_value=balance_entry['usd_price'],
)
balances = account_for_manually_tracked_asset_balances(db=self.data.db, balances=balances)
# Calculate usd totals
assets_total_balance: DefaultDict[Asset, Balance] = defaultdict(Balance)
total_usd_per_location: Dict[str, FVal] = {}
for location, asset_balance in balances.items():
total_usd_per_location[location] = ZERO
for asset, balance in asset_balance.items():
assets_total_balance[asset] += balance
total_usd_per_location[location] += balance.usd_value
net_usd = sum((balance.usd_value for _, balance in assets_total_balance.items()), ZERO)
liabilities_total_usd = sum((liability.usd_value for _, liability in liabilities.items()), ZERO) # noqa: E501
net_usd -= liabilities_total_usd
# Calculate location stats
location_stats: Dict[str, Any] = {}
for location, total_usd in total_usd_per_location.items():
if location == str(Location.BLOCKCHAIN):
total_usd -= liabilities_total_usd
percentage = (total_usd / net_usd).to_percentage() if net_usd != ZERO else '0%'
location_stats[location] = {
'usd_value': total_usd,
'percentage_of_net_value': percentage,
}
# Calculate 'percentage_of_net_value' per asset
assets_total_balance_as_dict: Dict[Asset, Dict[str, Any]] = {
asset: balance.to_dict() for asset, balance in assets_total_balance.items()
}
liabilities_as_dict: Dict[Asset, Dict[str, Any]] = {
asset: balance.to_dict() for asset, balance in liabilities.items()
}
for asset, balance_dict in assets_total_balance_as_dict.items():
percentage = (balance_dict['usd_value'] / net_usd).to_percentage() if net_usd != ZERO else '0%' # noqa: E501
assets_total_balance_as_dict[asset]['percentage_of_net_value'] = percentage
for asset, balance_dict in liabilities_as_dict.items():
percentage = (balance_dict['usd_value'] / net_usd).to_percentage() if net_usd != ZERO else '0%' # noqa: E501
liabilities_as_dict[asset]['percentage_of_net_value'] = percentage
# Compose balances response
result_dict = {
'assets': assets_total_balance_as_dict,
'liabilities': liabilities_as_dict,
'location': location_stats,
'net_usd': net_usd,
}
allowed_to_save = requested_save_data or self.data.db.should_save_balances()
if (problem_free or save_despite_errors) and allowed_to_save:
if not timestamp:
timestamp = Timestamp(int(time.time()))
self.data.db.save_balances_data(data=result_dict, timestamp=timestamp)
log.debug('query_balances data saved')
else:
log.debug(
'query_balances data not saved',
allowed_to_save=allowed_to_save,
problem_free=problem_free,
save_despite_errors=save_despite_errors,
)
return result_dict
def set_settings(self, settings: ModifiableDBSettings) -> Tuple[bool, str]:
"""Tries to set new settings. Returns True in success or False with message if error"""
if settings.eth_rpc_endpoint is not None:
result, msg = self.chain_manager.set_eth_rpc_endpoint(settings.eth_rpc_endpoint)
if not result:
return False, msg
if settings.ksm_rpc_endpoint is not None:
result, msg = self.chain_manager.set_ksm_rpc_endpoint(settings.ksm_rpc_endpoint)
if not result:
return False, msg
if settings.dot_rpc_endpoint is not None:
result, msg = self.chain_manager.set_dot_rpc_endpoint(settings.dot_rpc_endpoint)
if not result:
return False, msg
if settings.btc_derivation_gap_limit is not None:
self.chain_manager.btc_derivation_gap_limit = settings.btc_derivation_gap_limit
if settings.current_price_oracles is not None:
Inquirer().set_oracles_order(settings.current_price_oracles)
if settings.historical_price_oracles is not None:
PriceHistorian().set_oracles_order(settings.historical_price_oracles)
if settings.active_modules is not None:
self.chain_manager.process_new_modules_list(settings.active_modules)
self.data.db.set_settings(settings)
return True, ''
def get_settings(self) -> DBSettings:
"""Returns the db settings with a check whether premium is active or not"""
db_settings = self.data.db.get_settings(have_premium=self.premium is not None)
return db_settings
def setup_exchange(
self,
name: str,
location: Location,
api_key: ApiKey,
api_secret: ApiSecret,
passphrase: Optional[str] = None,
kraken_account_type: Optional['KrakenAccountType'] = None,
PAIRS: Optional[List[str]] = None, # noqa: N803
ftx_subaccount: Optional[str] = None,
) -> Tuple[bool, str]:
"""
Setup a new exchange with an api key and an api secret and optionally a passphrase
"""
is_success, msg = self.exchange_manager.setup_exchange(
name=name,
location=location,
api_key=api_key,
api_secret=api_secret,
database=self.data.db,
passphrase=passphrase,
ftx_subaccount=ftx_subaccount,
PAIRS=PAIRS,
)
if is_success:
# Success, save the result in the DB
self.data.db.add_exchange(
name=name,
location=location,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
kraken_account_type=kraken_account_type,
PAIRS=PAIRS,
ftx_subaccount=ftx_subaccount,
)
return is_success, msg
def remove_exchange(self, name: str, location: Location) -> Tuple[bool, str]:
if self.exchange_manager.get_exchange(name=name, location=location) is None:
return False, f'{str(location)} exchange {name} is not registered'
self.exchange_manager.delete_exchange(name=name, location=location)
# Success, remove it also from the DB
self.data.db.remove_exchange(name=name, location=location)
if self.exchange_manager.connected_exchanges.get(location) is None:
# was last exchange of the location type. Delete used query ranges
self.data.db.delete_used_query_range_for_exchange(location)
return True, ''
def query_periodic_data(self) -> Dict[str, Union[bool, Timestamp]]:
"""Query for frequently changing data"""
result: Dict[str, Union[bool, Timestamp]] = {}
if self.user_is_logged_in:
result['last_balance_save'] = self.data.db.get_last_balance_save_time()
result['eth_node_connection'] = self.chain_manager.ethereum.web3_mapping.get(NodeName.OWN, None) is not None # noqa : E501
result['last_data_upload_ts'] = Timestamp(self.premium_sync_manager.last_data_upload_ts) # noqa : E501
return result
def shutdown(self) -> None:
self.logout()
self.shutdown_event.set()
def _connect_ksm_manager_on_startup(self) -> bool:
return bool(self.data.db.get_blockchain_accounts().ksm)
def _connect_dot_manager_on_startup(self) -> bool:
return bool(self.data.db.get_blockchain_accounts().dot)
def create_oracle_cache(
self,
oracle: HistoricalPriceOracle,
from_asset: Asset,
to_asset: Asset,
purge_old: bool,
) -> None:
"""Creates the cache of the given asset pair from the start of time
until now for the given oracle.
if purge_old is true then any old cache in memory and in a file is purged
May raise:
- RemoteError if there is a problem reaching the oracle
- UnsupportedAsset if any of the two assets is not supported by the oracle
"""
if oracle != HistoricalPriceOracle.CRYPTOCOMPARE:
return # only for cryptocompare for now
self.cryptocompare.create_cache(from_asset, to_asset, purge_old)
|
the-stack_0_24284 | from pyravendb.tests.test_base import TestBase
from datetime import datetime, timedelta
from pyravendb.tools.utils import Utils
import unittest
class Time(object):
def __init__(self, td, dt):
self.td = td
self.dt = dt
class Item(object):
def __init__(self, val):
self.val = val
class TestConversion(TestBase):
def setUp(self):
super(TestConversion, self).setUp()
with self.store.open_session() as session:
session.store(Time(Utils.timedelta_to_str(timedelta(days=20, minutes=23, seconds=59, milliseconds=254)),
Utils.datetime_to_string(datetime.now())), "times/3")
session.store(Time(Utils.timedelta_to_str(timedelta(minutes=23, seconds=59, milliseconds=254)),
Utils.datetime_to_string(datetime.now())), "times/4")
session.save_changes()
def tearDown(self):
super(TestConversion, self).tearDown()
self.delete_all_topology_files()
def test_before_store(self):
def update_item(session, doc_id, entity):
entity.val = 2
with self.store.open_session() as session:
session.events.before_store = update_item
session.store(Item(1), "item/1")
session.save_changes()
with self.store.open_session() as session:
time = session.load("item/1", object_type=Item)
self.assertEqual(2, time.val)
def test_load_timedelta_and_datetime(self):
with self.store.open_session() as session:
times = session.load("times/3", object_type=Time, nested_object_types={"td": timedelta, "dt": datetime})
self.assertTrue(isinstance(times.td, timedelta) and isinstance(times.dt, datetime))
def test_store_conversion(self):
with self.store.open_session() as session:
times = Time(timedelta(days=1, hours=1), datetime.now())
session.store(times)
key = session.advanced.get_document_id(times)
session.save_changes()
with self.store.open_session() as session:
times = session.load(key, object_type=Time, nested_object_types={"td": timedelta, "dt": datetime})
self.assertTrue(isinstance(times.td, timedelta) and isinstance(times.dt, datetime))
def test_query_conversion(self):
with self.store.open_session() as session:
query = list(session.query(object_type=Time, nested_object_types={"td": timedelta,
"dt": datetime}).
where_greater_than_or_equal("td", timedelta(days=9)))
not_working = False
if len(query) < 1:
not_working = True
for item in query:
if not isinstance(item.td, timedelta):
not_working = True
self.assertFalse(not_working)
self.assertEqual(len(query), 1)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_24285 | import numpy as np
import random
import os
from collections import namedtuple
import abc
import torch
from torch import optim
from ..core import SingleTrainer, AbstractTrainer, AbstractAgent
from ..common.pytorch import pytorch_call
from ..common.torchsummary import minimal_summary
from .replay import ReplayBuffer
from .util import qlearning, polyak_update, double_qlearning
class DeepQTrainer(SingleTrainer):
def __init__(self, name, env_kwargs, model_kwargs):
super().__init__(env_kwargs = env_kwargs, model_kwargs = model_kwargs)
self.name = name
self.batch_size = 32
self.gamma = 0.99
self.epsilon_start = 1.0
self.epsilon_end = 0.02
self.update_period = 500 # Double DQN update period
self.annealing_steps = 200000
self.preprocess_steps = 100000
self.replay_size = 50000
self.learning_rate = 0.001
self.model_kwargs = model_kwargs
self.max_episode_steps = None
self.double_dqn = True
self.allow_gpu = True
self._global_t = 0
self._state = None
self._episode_length = 0
self._episode_reward = 0.0
self._local_timestep = 0
self._replay = None
@abc.abstractclassmethod
def create_model(self, **kwargs):
pass
def show_summary(self, model):
batch_shape = (self.batch_size,)
shapes = (batch_shape + self.env.observation_space.shape, batch_shape, batch_shape, batch_shape + self.env.observation_space.shape, batch_shape)
minimal_summary(model, shapes)
def _build_train(self, model, main_device):
optimizer = optim.Adam(model.parameters(), lr = self.learning_rate)
update_params = lambda: None
if self.double_dqn:
target_net = self.create_model(**self._model_kwargs)
target_net = target_net.to(main_device)
update_params = lambda: polyak_update(1.0, target_net, model)
def compute_loss(observations, actions, rewards, next_observations, terminals):
with torch.no_grad():
q_next_online_net = model(next_observations)
if self.double_dqn:
q_select = target_net(next_observations)
q = model(observations)
pcontinues = (1.0 - terminals) * self.gamma
if self.double_dqn:
loss = double_qlearning(q, actions, rewards, pcontinues, q_next_online_net, q_select)
else:
loss = qlearning(q, actions, rewards, pcontinues, q_next_online_net)
return loss
@pytorch_call(main_device)
def train(*args):
loss = compute_loss(*args)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item()
self._update_parameters = update_params
return train
def _build_graph(self, allow_gpu, **model_kwargs):
model = self.create_model(**model_kwargs)
# Show summary
self.show_summary(model)
cuda_devices = torch.cuda.device_count()
if cuda_devices == 0 or not allow_gpu:
print('Using CPU only')
main_device = torch.device('cpu')
get_state_dict = lambda: model.state_dict()
else:
print('Using single GPU')
main_device = torch.device('cuda')
model = model.to(main_device)
get_state_dict = lambda: model.state_dict()
model.train()
# Build train and act functions
self._train = self._build_train(model, main_device)
@pytorch_call(main_device)
def act(observations):
observations = observations
with torch.no_grad():
q = model(observations)
actions = torch.argmax(q, -1)
return actions.item()
@pytorch_call(main_device)
def q(observations):
observations = observations
with torch.no_grad():
q = model(observations)
return q.detach()
self._act = act
self._q = q
self._save = lambda path: torch.save(get_state_dict(), os.path.join(path, 'weights.pth'))
self.main_device = main_device
return model
def save(self, path):
super().save(path)
self._save(path)
def _initialize(self, **model_kwargs):
self._replay = ReplayBuffer(self.replay_size)
model = self._build_graph(self.allow_gpu, **model_kwargs)
return model
@property
def epsilon(self):
start_eps = self.epsilon_start
end_eps = self.epsilon_end
if self._global_t < self.preprocess_steps:
return 1.0
return max(start_eps - (start_eps - end_eps) * ((self._global_t - self.preprocess_steps) / self.annealing_steps), end_eps)
def step(self, state, mode = 'validation'):
if random.random() < self.epsilon:
return random.randrange(self.env.action_space.n)
return self.act(state)
def on_optimization_done(self, stats):
'''
Callback called after the optimization step
'''
pass
def act(self, state):
return self._act(state[None])
def _optimize(self):
state, action, reward, next_state, done = self._replay.sample(self.batch_size)
td_losses = self._train(state, action, reward, next_state, done)
loss = np.mean(np.abs(td_losses))
if self._global_t % self.update_period == 0:
self._update_parameters()
return loss
def process(self, mode = 'train', **kwargs):
episode_end = None
if self._state is None:
self._state = self.env.reset()
old_state = self._state
action = self.step(self._state, mode)
self._state, reward, done, env_props = self.env.step(action)
if mode == 'train':
self._replay.add(old_state, action, reward, self._state, done)
self._episode_length += 1
self._episode_reward += reward
if done or (self.max_episode_steps is not None and self._episode_length >= self.max_episode_steps):
episode_end = (self._episode_length, self._episode_reward)
self._episode_length = 0
self._episode_reward = 0.0
self._state = self.env.reset()
stats = dict()
if self._global_t >= self.preprocess_steps and mode == 'train':
loss = self._optimize()
stats = dict(loss = loss, epsilon = self.epsilon)
self.on_optimization_done(stats)
if 'win' in env_props:
stats['win'] = env_props['win']
if mode == 'train':
self._global_t += 1
return (1, episode_end, stats)
class DeepQAgent(AbstractAgent):
def __init__(self, checkpoint_dir = None, name = 'deepq'):
super().__init__(name)
if checkpoint_dir is None:
from ..configuration import configuration
checkpoint_dir = configuration.get('models_path')
self._load(checkpoint_dir)
@abc.abstractclassmethod
def create_model(self, **model_kwargs):
pass
def _load(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, self.name, 'weights.pth')
self.model = self.create_model()
self.model.load_state_dict(torch.load(path, map_location=torch.device('cpu')))
self.model.eval()
def wrap_env(self, env):
return env
def q(self, state):
with torch.no_grad():
observation = torch.from_numpy(state)
return self.model(observation).numpy()
def act(self, state):
with torch.no_grad():
observation = torch.from_numpy(state)
action = torch.argmax(self.model(observation), dim = -1).squeeze(0)
return action.item() |
the-stack_0_24286 | import frappe
def get_context(context):
print(f"\n\n\n\n{frappe.form_dict}\n\n\n\n")
try:
docname = frappe.form_dict.docnam
context.property = frappe.get_doc("Property", frappe.form_dict.docname)
context.agent = frappe.get_doc("Agent", context.property.agent)
related_properties = frappe.db.sql(f"""
SELECT creation, name, property_name, status, address, grand_total,
image FROM `tabProperty` WHERE property_type='{context.property.property_type}'
AND name != '{context.property.name}' ORDER BY creation DESC LIMIT 3;
""", as_dict=True)
context.related_properties = related_properties
except Exception as e:
frappe.local.flags.redirect_location = '/404'
raise frappe.Redirect
print(frappe.session)
return context
|
the-stack_0_24287 | from utils import add_gaussian_noise, symetrize
from bm3d_1st_step import bm3d_1st_step
from bm3d_2nd_step import bm3d_2nd_step
from psnr import compute_psnr
def run_bm3d(noisy_im, sigma,
n_H, k_H, N_H, p_H, tauMatch_H, useSD_H, tau_2D_H, lambda3D_H,
n_W, k_W, N_W, p_W, tauMatch_W, useSD_W, tau_2D_W):
k_H = 8 if (tau_2D_H == 'BIOR' or sigma < 40.) else 12
k_W = 8 if (tau_2D_W == 'BIOR' or sigma < 40.) else 12
noisy_im_p = symetrize(noisy_im, n_H)
img_basic = bm3d_1st_step(sigma, noisy_im_p, n_H, k_H, N_H, p_H, lambda3D_H, tauMatch_H, useSD_H, tau_2D_H)
img_basic = img_basic[n_H: -n_H, n_H: -n_H]
assert not np.any(np.isnan(img_basic))
img_basic_p = symetrize(img_basic, n_W)
noisy_im_p = symetrize(noisy_im, n_W)
img_denoised = bm3d_2nd_step(sigma, noisy_im_p, img_basic_p, n_W, k_W, N_W, p_W, tauMatch_W, useSD_W, tau_2D_W)
img_denoised = img_denoised[n_W: -n_W, n_W: -n_W]
return img_basic, img_denoised
if __name__ == '__main__':
import os
import cv2
import numpy as np
# <hyper parameter> -------------------------------------------------------------------------------
n_H = 16
k_H = 8
N_H = 16
p_H = 3
lambda3D_H = 2.7 # ! Threshold for Hard Thresholding
useSD_H = False
tau_2D_H = 'BIOR'
n_W = 16
k_W = 8
N_W = 32
p_W = 3
useSD_W = True
tau_2D_W = 'DCT'
# <\ hyper parameter> -----------------------------------------------------------------------------
im_dir = 'test_data/image'
save_dir = 'temp_test_result'
os.makedirs(save_dir, exist_ok=True)
# for im_name in os.listdir(im_dir):
for im_name in ['Cameraman.png',]:
# sigma_list = [2, 5, 10, 20, 30, 40, 60, 80, 100]
sigma_list = [20]
for sigma in sigma_list:
print(im_name, ' ', sigma)
tauMatch_H = 2500 if sigma < 35 else 5000 # ! threshold determinates similarity between patches
tauMatch_W = 400 if sigma < 35 else 3500 # ! threshold determinates similarity between patches
noisy_dir = 'test_data/sigma' + str(sigma)
im_path = os.path.join(im_dir, im_name)
im = cv2.imread(im_path, cv2.IMREAD_GRAYSCALE)
noisy_im_path = os.path.join(noisy_dir, im_name)
noisy_im = cv2.imread(noisy_im_path, cv2.IMREAD_GRAYSCALE)
im1, im2 = run_bm3d(noisy_im, sigma,
n_H, k_H, N_H, p_H, tauMatch_H, useSD_H, tau_2D_H, lambda3D_H,
n_W, k_W, N_W, p_W, tauMatch_W, useSD_W, tau_2D_W)
psnr_1st = compute_psnr(im, im1)
psnr_2nd = compute_psnr(im, im2)
im1 = (np.clip(im1, 0, 255)).astype(np.uint8)
im2 = (np.clip(im2, 0, 255)).astype(np.uint8)
save_name = im_name[:-4] + '_s' + str(sigma) + '_py_1st_P' + '%.4f' % psnr_1st + '.png'
cv2.imwrite(os.path.join(save_dir, save_name), im1)
save_name = im_name[:-4] + '_s' + str(sigma) + '_py_2nd_P' + '%.4f' % psnr_2nd + '.png'
cv2.imwrite(os.path.join(save_dir, save_name), im2)
|
the-stack_0_24288 | # coding: utf-8
import argparse
import tempfile
import json
import numpy as np
import pytest
import torch
from espnet.asr.pytorch_backend.asr_init import load_trained_model
import espnet.lm.pytorch_backend.extlm as extlm_pytorch
from espnet.nets.beam_search_transducer import BeamSearchTransducer
from espnet.nets.pytorch_backend.e2e_asr_transducer import E2E
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.nets.pytorch_backend.nets_utils import pad_list
def get_default_train_args(**kwargs):
train_defaults = dict(
etype="vggblstmp",
elayers=1,
subsample="1_2_2_1_1",
eunits=4,
eprojs=4,
dtype="lstm",
dlayers=1,
dunits=4,
dec_embed_dim=4,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
dropout_rate_embed_decoder=0.0,
joint_dim=2,
joint_activation_type="tanh",
transducer_loss_weight=1.0,
use_ctc_loss=False,
ctc_loss_weight=0.0,
ctc_loss_dropout_rate=0.0,
use_lm_loss=False,
lm_loss_weight=0.0,
use_aux_transducer_loss=False,
aux_transducer_loss_weight=0.0,
aux_transducer_loss_enc_output_layers=[],
use_symm_kl_div_loss=False,
symm_kl_div_loss_weight=0.0,
char_list=["a", "b", "c", "d"],
sym_space="<space>",
sym_blank="<blank>",
report_cer=False,
report_wer=False,
verbose=0,
outdir=None,
rnnlm=None,
model_module="espnet.nets.pytorch_backend.e2e_asr_transducer:E2E",
)
train_defaults.update(kwargs)
return argparse.Namespace(**train_defaults)
def get_default_recog_args(**kwargs):
recog_defaults = dict(
batchsize=0,
beam_size=1,
nbest=1,
verbose=0,
search_type="default",
nstep=1,
max_sym_exp=2,
prefix_alpha=2,
u_max=5,
expansion_gamma=2,
expansion_beta=0.2,
score_norm_transducer=True,
rnnlm=None,
lm_weight=0.1,
)
recog_defaults.update(kwargs)
return argparse.Namespace(**recog_defaults)
def get_default_scope_inputs():
idim = 15
odim = 4
ilens = [12, 8]
olens = [8, 4]
return idim, odim, ilens, olens
def get_lm():
n_layers = 1
n_units = 4
char_list = ["<blank>", "<space>", "a", "b", "c", "d", "<eos>"]
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(char_list), n_layers, n_units, typ="lstm")
)
return rnnlm
def get_wordlm():
n_layers = 1
n_units = 8
char_list = ["<blank>", "<space>", "a", "b", "c", "d", "<eos>"]
word_list = ["<blank>", "<unk>", "ab", "id", "ac", "bd", "<eos>"]
char_dict = {x: i for i, x in enumerate(char_list)}
word_dict = {x: i for i, x in enumerate(word_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(word_list), n_layers, n_units)
)
word_rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor, word_dict, char_dict)
)
return word_rnnlm
def prepare_inputs(idim, odim, ilens, olens, is_cuda=False):
np.random.seed(1)
feats = [np.random.randn(ilen, idim).astype(np.float32) for ilen in ilens]
labels = [np.random.randint(1, odim, olen).astype(np.int32) for olen in olens]
feats_len = np.array([x.shape[0] for x in feats], dtype=np.int32)
feats = pad_list([torch.from_numpy(x).float() for x in feats], 0)
labels = pad_list([torch.from_numpy(y).long() for y in labels], -1)
feats_len = torch.from_numpy(feats_len).long()
if is_cuda:
feats = feats.cuda()
labels = labels.cuda()
feats_len = feats_len.cuda()
return feats, feats_len, labels
@pytest.mark.parametrize(
"train_dic, recog_dic",
[
({}, {}),
({"eprojs": 4}, {}),
({"dlayers": 2}, {}),
({"etype": "gru"}, {}),
({"etype": "blstm"}, {}),
({"etype": "blstmp", "elayers": 2, "eprojs": 4}, {}),
({"etype": "vgggru"}, {}),
({"etype": "vggbru"}, {}),
({"etype": "vgggrup", "elayers": 2, "eprojs": 4}, {}),
({"dtype": "gru"}, {}),
({"dtype": "bgrup"}, {}),
({"dtype": "gru", "dlayers": 2}, {}),
({"joint-activation-type": "relu"}, {}),
({"joint-activation-type": "swish"}, {}),
({}, {"score_norm_transducer": False}),
({"report_cer": True, "report_wer": True}, {}),
({}, {"nbest": 2}),
({}, {"beam_size": 1}),
({}, {"beam_size": 2}),
({}, {"beam_size": 2, "search_type": "nsc"}),
({}, {"beam_size": 2, "search_type": "nsc", "nstep": 2, "prefix_alpha": 1}),
({}, {"beam_size": 2, "search_type": "tsd"}),
({}, {"beam_size": 2, "search_type": "tsd", "max-sym-exp": 3}),
({}, {"beam_size": 2, "search_type": "alsd"}),
({}, {"beam_size": 2, "search_type": "alsd", "u_max": 10}),
({}, {"beam_size": 2, "search_type": "maes", "nstep": 2}),
(
{},
{
"beam_size": 2,
"search_type": "default",
"rnnlm": get_wordlm(),
"lm_weight": 1.0,
},
),
({}, {"beam_size": 2, "search_type": "nsc", "rnnlm": get_lm()}),
({}, {"beam_size": 2, "search_type": "nsc", "rnnlm": get_wordlm()}),
({}, {"beam_size": 2, "search_type": "nsc", "nstep": 2, "rnnlm": get_lm()}),
({}, {"beam_size": 2, "search_type": "nsc", "nstep": 2, "rnnlm": get_wordlm()}),
(
{},
{
"beam_size": 2,
"search_type": "alsd",
"rnnlm": get_lm(),
"lm_weight": 0.2,
},
),
(
{},
{
"beam_size": 2,
"search_type": "alsd",
"rnnlm": get_wordlm(),
"lm_weight": 0.6,
},
),
({}, {"beam_size": 2, "search_type": "tsd", "rnnlm": get_lm()}),
({}, {"beam_size": 2, "search_type": "tsd", "rnnlm": get_wordlm()}),
(
{},
{"beam_size": 2, "search_type": "maes", "nstep": 2, "rnnlm": get_wordlm()},
),
],
)
def test_pytorch_transducer_trainable_and_decodable(train_dic, recog_dic):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
recog_args = get_default_recog_args(**recog_dic)
model = E2E(idim, odim, train_args)
batch = prepare_inputs(idim, odim, ilens, olens)
# to avoid huge training time, cer/wer report
# is only enabled at validation steps
if train_args.report_cer or train_args.report_wer:
model.training = False
loss = model(*batch)
loss.backward()
beam_search = BeamSearchTransducer(
decoder=model.dec,
joint_network=model.transducer_tasks.joint_network,
beam_size=recog_args.beam_size,
lm=recog_args.rnnlm,
lm_weight=recog_args.lm_weight,
search_type=recog_args.search_type,
max_sym_exp=recog_args.max_sym_exp,
u_max=recog_args.u_max,
nstep=recog_args.nstep,
prefix_alpha=recog_args.prefix_alpha,
score_norm=recog_args.score_norm_transducer,
)
with torch.no_grad():
in_data = np.random.randn(20, idim)
model.recognize(in_data, beam_search)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize(
"train_dic",
[
{"report_cer": True, "report_wer": True},
],
)
@pytest.mark.execution_timeout(3.2)
def test_pytorch_multi_gpu_trainable(train_dic):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
ngpu = 2
device_ids = list(range(ngpu))
model = E2E(idim, odim, train_args)
model = torch.nn.DataParallel(model, device_ids)
model.cuda()
batch = prepare_inputs(idim, odim, ilens, olens, is_cuda=True)
loss = 1.0 / ngpu * model(*batch)
loss.backward(loss.new_ones(ngpu))
def test_calculate_plot_attention():
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args()
model = E2E(idim, odim, train_args)
batch = prepare_inputs(idim, odim, ilens, olens, is_cuda=False)
assert model.calculate_all_attentions(*batch) == []
@pytest.mark.parametrize(
"train_dic",
[
{
"elayers": 3,
"use_aux_transducer_loss": True,
"aux_transducer_loss_enc_output_layers": [1],
},
{
"elayers": 2,
"use_ctc_loss": True,
"ctc_loss_weight": 0.5,
"ctc_loss_dropout_rate": 0.1,
},
{
"etype": "vggblstm",
"elayers": 3,
"use_aux_transducer_loss": True,
"aux_transducer_loss": True,
"use_symm_kl_div_loss": True,
"symm_kl_div_loss_weight": 0.5,
"aux_transducer_loss_enc_output_layers": [0, 1],
},
{"dlayers": 2, "use_lm_loss": True, "lm_loss_weight": 0.5},
],
)
def test_auxiliary_task(train_dic):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
recog_args = get_default_recog_args()
model = E2E(idim, odim, train_args)
batch = prepare_inputs(idim, odim, ilens, olens)
loss = model(*batch)
loss.backward()
beam_search = BeamSearchTransducer(
decoder=model.dec,
joint_network=model.transducer_tasks.joint_network,
beam_size=recog_args.beam_size,
lm=recog_args.rnnlm,
lm_weight=recog_args.lm_weight,
search_type=recog_args.search_type,
max_sym_exp=recog_args.max_sym_exp,
u_max=recog_args.u_max,
nstep=recog_args.nstep,
prefix_alpha=recog_args.prefix_alpha,
score_norm=recog_args.score_norm_transducer,
)
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir="/tmp")
torch.save(model.state_dict(), tmpdir + "/model.dummy.best")
with open(tmpdir + "/model.json", "wb") as f:
f.write(
json.dumps(
(idim, odim, vars(train_args)),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
with torch.no_grad():
in_data = np.random.randn(20, idim)
model, _ = load_trained_model(tmpdir + "/model.dummy.best", training=False)
model.recognize(in_data, beam_search)
def test_invalid_aux_transducer_loss_enc_layers():
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(use_aux_transducer_loss=True)
with pytest.raises(ValueError):
E2E(idim, odim, train_args)
train_args = get_default_train_args(
use_aux_transducer_loss=True, aux_transducer_loss_enc_output_layers="foo"
)
with pytest.raises(ValueError):
E2E(idim, odim, train_args)
train_args = get_default_train_args(
use_aux_transducer_loss=True, aux_transducer_loss_enc_output_layers=[0, 4]
)
with pytest.raises(ValueError):
E2E(idim, odim, train_args)
train_args = get_default_train_args(
use_aux_transducer_loss=True,
use_symm_kl_div_loss=True,
aux_transducer_loss_enc_output_layers=[0],
elayers=3,
etype="blstmp",
subsample="1_2_1",
)
with pytest.raises(ValueError):
E2E(idim, odim, train_args)
|
the-stack_0_24289 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..ops import emulate_int
class IntEmbedding(nn.Module):
"""
Quantized counterpart of the nn.Embedding module that applies QuantNoise during training.
Args:
- num_embeddings: number of tokens
- embedding_dim: embedding dimension
- p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- bits: number of bits
- method: choose among {"tensor", "histogram", "channel"}
- update_step: recompute scale and zero_point every update_steps iterations
Remarks:
- We use the straight-through estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- At test time, the weights are fully quantized
"""
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
p=0,
update_step=1000,
bits=8,
method="histogram",
):
super(IntEmbedding, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert (
padding_idx < self.num_embeddings
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert (
padding_idx >= -self.num_embeddings
), "Padding_idx must be within num_embeddings"
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if _weight is None:
self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim))
self.reset_parameters()
else:
assert list(_weight.shape) == [
num_embeddings,
embedding_dim,
], "Shape of weight does not match num_embeddings and embedding_dim"
self.weight = nn.Parameter(_weight)
self.sparse = sparse
# quantization parameters
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def reset_parameters(self):
nn.init.normal_(self.weight)
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input):
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.training else 1
# update parameters every 1000 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# quantize weight
weight_quantized, self.scale, self.zero_point = emulate_int(
self.weight.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(self.weight)
mask.bernoulli_(1 - p)
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = -self.scale * self.zero_point
clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
weight = (
torch.clamp(self.weight, clamp_low.item(), clamp_high.item())
+ noise.detach()
)
# return output
output = F.embedding(
input,
weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return output
def extra_repr(self):
s = "{num_embeddings}, {embedding_dim}"
if self.padding_idx is not None:
s += ", padding_idx={padding_idx}"
if self.max_norm is not None:
s += ", max_norm={max_norm}"
if self.norm_type != 2:
s += ", norm_type={norm_type}"
if self.scale_grad_by_freq is not False:
s += ", scale_grad_by_freq={scale_grad_by_freq}"
if self.sparse is not False:
s += ", sparse=True"
s += "quant_noise={p}, bits={bits}, method={method}"
return s.format(**self.__dict__)
|
the-stack_0_24290 | from functools import partial
from typing import Callable, Optional, List
from mypy import message_registry
from mypy.nodes import Expression, StrExpr, IntExpr, DictExpr, UnaryExpr
from mypy.plugin import (
Plugin, FunctionContext, MethodContext, MethodSigContext, AttributeContext, ClassDefContext,
CheckerPluginInterface,
)
from mypy.plugins.common import try_getting_str_literals
from mypy.types import (
FunctionLike, Type, Instance, AnyType, TypeOfAny, CallableType, NoneType, TypedDictType,
TypeVarType, TPDICT_FB_NAMES, get_proper_type, LiteralType, TupleType
)
from mypy.subtypes import is_subtype
from mypy.typeops import make_simplified_union
from mypy.checkexpr import is_literal_type_like
from mypy.checker import detach_callable
class DefaultPlugin(Plugin):
"""Type checker plugin that is enabled by default."""
def get_function_hook(self, fullname: str
) -> Optional[Callable[[FunctionContext], Type]]:
from mypy.plugins import ctypes, singledispatch
if fullname in ('contextlib.contextmanager', 'contextlib.asynccontextmanager'):
return contextmanager_callback
elif fullname == 'builtins.open' and self.python_version[0] == 3:
return open_callback
elif fullname == 'ctypes.Array':
return ctypes.array_constructor_callback
elif fullname == 'functools.singledispatch':
return singledispatch.create_singledispatch_function_callback
return None
def get_method_signature_hook(self, fullname: str
) -> Optional[Callable[[MethodSigContext], FunctionLike]]:
from mypy.plugins import ctypes, singledispatch
if fullname == 'typing.Mapping.get':
return typed_dict_get_signature_callback
elif fullname in set(n + '.setdefault' for n in TPDICT_FB_NAMES):
return typed_dict_setdefault_signature_callback
elif fullname in set(n + '.pop' for n in TPDICT_FB_NAMES):
return typed_dict_pop_signature_callback
elif fullname in set(n + '.update' for n in TPDICT_FB_NAMES):
return typed_dict_update_signature_callback
elif fullname in set(n + '.__delitem__' for n in TPDICT_FB_NAMES):
return typed_dict_delitem_signature_callback
elif fullname == 'ctypes.Array.__setitem__':
return ctypes.array_setitem_callback
elif fullname == singledispatch.SINGLEDISPATCH_CALLABLE_CALL_METHOD:
return singledispatch.call_singledispatch_function_callback
return None
def get_method_hook(self, fullname: str
) -> Optional[Callable[[MethodContext], Type]]:
from mypy.plugins import ctypes, singledispatch
if fullname == 'typing.Mapping.get':
return typed_dict_get_callback
elif fullname == 'builtins.int.__pow__':
return int_pow_callback
elif fullname == 'builtins.int.__neg__':
return int_neg_callback
elif fullname in ('builtins.tuple.__mul__', 'builtins.tuple.__rmul__'):
return tuple_mul_callback
elif fullname in set(n + '.setdefault' for n in TPDICT_FB_NAMES):
return typed_dict_setdefault_callback
elif fullname in set(n + '.pop' for n in TPDICT_FB_NAMES):
return typed_dict_pop_callback
elif fullname in set(n + '.__delitem__' for n in TPDICT_FB_NAMES):
return typed_dict_delitem_callback
elif fullname == 'ctypes.Array.__getitem__':
return ctypes.array_getitem_callback
elif fullname == 'ctypes.Array.__iter__':
return ctypes.array_iter_callback
elif fullname == 'pathlib.Path.open':
return path_open_callback
elif fullname == singledispatch.SINGLEDISPATCH_REGISTER_METHOD:
return singledispatch.singledispatch_register_callback
elif fullname == singledispatch.REGISTER_CALLABLE_CALL_METHOD:
return singledispatch.call_singledispatch_function_after_register_argument
return None
def get_attribute_hook(self, fullname: str
) -> Optional[Callable[[AttributeContext], Type]]:
from mypy.plugins import ctypes
from mypy.plugins import enums
if fullname == 'ctypes.Array.value':
return ctypes.array_value_callback
elif fullname == 'ctypes.Array.raw':
return ctypes.array_raw_callback
elif fullname in enums.ENUM_NAME_ACCESS:
return enums.enum_name_callback
elif fullname in enums.ENUM_VALUE_ACCESS:
return enums.enum_value_callback
return None
def get_class_decorator_hook(self, fullname: str
) -> Optional[Callable[[ClassDefContext], None]]:
from mypy.plugins import attrs
from mypy.plugins import dataclasses
from mypy.plugins import functools
if fullname in attrs.attr_class_makers:
return attrs.attr_class_maker_callback
elif fullname in attrs.attr_dataclass_makers:
return partial(
attrs.attr_class_maker_callback,
auto_attribs_default=True,
)
elif fullname in attrs.attr_frozen_makers:
return partial(
attrs.attr_class_maker_callback,
auto_attribs_default=None,
frozen_default=True,
)
elif fullname in attrs.attr_define_makers:
return partial(
attrs.attr_class_maker_callback,
auto_attribs_default=None,
)
elif fullname in dataclasses.dataclass_makers:
return dataclasses.dataclass_class_maker_callback
elif fullname in functools.functools_total_ordering_makers:
return functools.functools_total_ordering_maker_callback
return None
def open_callback(ctx: FunctionContext) -> Type:
"""Infer a better return type for 'open'."""
return _analyze_open_signature(
arg_types=ctx.arg_types,
args=ctx.args,
mode_arg_index=1,
default_return_type=ctx.default_return_type,
api=ctx.api,
)
def path_open_callback(ctx: MethodContext) -> Type:
"""Infer a better return type for 'pathlib.Path.open'."""
return _analyze_open_signature(
arg_types=ctx.arg_types,
args=ctx.args,
mode_arg_index=0,
default_return_type=ctx.default_return_type,
api=ctx.api,
)
def _analyze_open_signature(arg_types: List[List[Type]],
args: List[List[Expression]],
mode_arg_index: int,
default_return_type: Type,
api: CheckerPluginInterface,
) -> Type:
"""A helper for analyzing any function that has approximately
the same signature as the builtin 'open(...)' function.
Currently, the only thing the caller can customize is the index
of the 'mode' argument. If the mode argument is omitted or is a
string literal, we refine the return type to either 'TextIO' or
'BinaryIO' as appropriate.
"""
mode = None
if not arg_types or len(arg_types[mode_arg_index]) != 1:
mode = 'r'
else:
mode_expr = args[mode_arg_index][0]
if isinstance(mode_expr, StrExpr):
mode = mode_expr.value
if mode is not None:
assert isinstance(default_return_type, Instance) # type: ignore
if 'b' in mode:
return api.named_generic_type('typing.BinaryIO', [])
else:
return api.named_generic_type('typing.TextIO', [])
return default_return_type
def contextmanager_callback(ctx: FunctionContext) -> Type:
"""Infer a better return type for 'contextlib.contextmanager'."""
# Be defensive, just in case.
if ctx.arg_types and len(ctx.arg_types[0]) == 1:
arg_type = get_proper_type(ctx.arg_types[0][0])
default_return = get_proper_type(ctx.default_return_type)
if (isinstance(arg_type, CallableType)
and isinstance(default_return, CallableType)):
# The stub signature doesn't preserve information about arguments so
# add them back here.
return detach_callable(default_return.copy_modified(
arg_types=arg_type.arg_types,
arg_kinds=arg_type.arg_kinds,
arg_names=arg_type.arg_names,
variables=arg_type.variables,
is_ellipsis_args=arg_type.is_ellipsis_args))
return ctx.default_return_type
def typed_dict_get_signature_callback(ctx: MethodSigContext) -> CallableType:
"""Try to infer a better signature type for TypedDict.get.
This is used to get better type context for the second argument that
depends on a TypedDict value type.
"""
signature = ctx.default_signature
if (isinstance(ctx.type, TypedDictType)
and len(ctx.args) == 2
and len(ctx.args[0]) == 1
and isinstance(ctx.args[0][0], StrExpr)
and len(signature.arg_types) == 2
and len(signature.variables) == 1
and len(ctx.args[1]) == 1):
key = ctx.args[0][0].value
value_type = get_proper_type(ctx.type.items.get(key))
ret_type = signature.ret_type
if value_type:
default_arg = ctx.args[1][0]
if (isinstance(value_type, TypedDictType)
and isinstance(default_arg, DictExpr)
and len(default_arg.items) == 0):
# Caller has empty dict {} as default for typed dict.
value_type = value_type.copy_modified(required_keys=set())
# Tweak the signature to include the value type as context. It's
# only needed for type inference since there's a union with a type
# variable that accepts everything.
tv = signature.variables[0]
assert isinstance(tv, TypeVarType)
return signature.copy_modified(
arg_types=[signature.arg_types[0],
make_simplified_union([value_type, tv])],
ret_type=ret_type)
return signature
def typed_dict_get_callback(ctx: MethodContext) -> Type:
"""Infer a precise return type for TypedDict.get with literal first argument."""
if (isinstance(ctx.type, TypedDictType)
and len(ctx.arg_types) >= 1
and len(ctx.arg_types[0]) == 1):
keys = try_getting_str_literals(ctx.args[0][0], ctx.arg_types[0][0])
if keys is None:
return ctx.default_return_type
output_types: List[Type] = []
for key in keys:
value_type = get_proper_type(ctx.type.items.get(key))
if value_type is None:
return ctx.default_return_type
if len(ctx.arg_types) == 1:
output_types.append(value_type)
elif (len(ctx.arg_types) == 2 and len(ctx.arg_types[1]) == 1
and len(ctx.args[1]) == 1):
default_arg = ctx.args[1][0]
if (isinstance(default_arg, DictExpr) and len(default_arg.items) == 0
and isinstance(value_type, TypedDictType)):
# Special case '{}' as the default for a typed dict type.
output_types.append(value_type.copy_modified(required_keys=set()))
else:
output_types.append(value_type)
output_types.append(ctx.arg_types[1][0])
if len(ctx.arg_types) == 1:
output_types.append(NoneType())
return make_simplified_union(output_types)
return ctx.default_return_type
def typed_dict_pop_signature_callback(ctx: MethodSigContext) -> CallableType:
"""Try to infer a better signature type for TypedDict.pop.
This is used to get better type context for the second argument that
depends on a TypedDict value type.
"""
signature = ctx.default_signature
str_type = ctx.api.named_generic_type('builtins.str', [])
if (isinstance(ctx.type, TypedDictType)
and len(ctx.args) == 2
and len(ctx.args[0]) == 1
and isinstance(ctx.args[0][0], StrExpr)
and len(signature.arg_types) == 2
and len(signature.variables) == 1
and len(ctx.args[1]) == 1):
key = ctx.args[0][0].value
value_type = ctx.type.items.get(key)
if value_type:
# Tweak the signature to include the value type as context. It's
# only needed for type inference since there's a union with a type
# variable that accepts everything.
tv = signature.variables[0]
assert isinstance(tv, TypeVarType)
typ = make_simplified_union([value_type, tv])
return signature.copy_modified(
arg_types=[str_type, typ],
ret_type=typ)
return signature.copy_modified(arg_types=[str_type, signature.arg_types[1]])
def typed_dict_pop_callback(ctx: MethodContext) -> Type:
"""Type check and infer a precise return type for TypedDict.pop."""
if (isinstance(ctx.type, TypedDictType)
and len(ctx.arg_types) >= 1
and len(ctx.arg_types[0]) == 1):
keys = try_getting_str_literals(ctx.args[0][0], ctx.arg_types[0][0])
if keys is None:
ctx.api.fail(message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL, ctx.context)
return AnyType(TypeOfAny.from_error)
value_types = []
for key in keys:
if key in ctx.type.required_keys:
ctx.api.msg.typeddict_key_cannot_be_deleted(ctx.type, key, ctx.context)
value_type = ctx.type.items.get(key)
if value_type:
value_types.append(value_type)
else:
ctx.api.msg.typeddict_key_not_found(ctx.type, key, ctx.context)
return AnyType(TypeOfAny.from_error)
if len(ctx.args[1]) == 0:
return make_simplified_union(value_types)
elif (len(ctx.arg_types) == 2 and len(ctx.arg_types[1]) == 1
and len(ctx.args[1]) == 1):
return make_simplified_union([*value_types, ctx.arg_types[1][0]])
return ctx.default_return_type
def typed_dict_setdefault_signature_callback(ctx: MethodSigContext) -> CallableType:
"""Try to infer a better signature type for TypedDict.setdefault.
This is used to get better type context for the second argument that
depends on a TypedDict value type.
"""
signature = ctx.default_signature
str_type = ctx.api.named_generic_type('builtins.str', [])
if (isinstance(ctx.type, TypedDictType)
and len(ctx.args) == 2
and len(ctx.args[0]) == 1
and isinstance(ctx.args[0][0], StrExpr)
and len(signature.arg_types) == 2
and len(ctx.args[1]) == 1):
key = ctx.args[0][0].value
value_type = ctx.type.items.get(key)
if value_type:
return signature.copy_modified(arg_types=[str_type, value_type])
return signature.copy_modified(arg_types=[str_type, signature.arg_types[1]])
def typed_dict_setdefault_callback(ctx: MethodContext) -> Type:
"""Type check TypedDict.setdefault and infer a precise return type."""
if (isinstance(ctx.type, TypedDictType)
and len(ctx.arg_types) == 2
and len(ctx.arg_types[0]) == 1
and len(ctx.arg_types[1]) == 1):
keys = try_getting_str_literals(ctx.args[0][0], ctx.arg_types[0][0])
if keys is None:
ctx.api.fail(message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL, ctx.context)
return AnyType(TypeOfAny.from_error)
default_type = ctx.arg_types[1][0]
value_types = []
for key in keys:
value_type = ctx.type.items.get(key)
if value_type is None:
ctx.api.msg.typeddict_key_not_found(ctx.type, key, ctx.context)
return AnyType(TypeOfAny.from_error)
# The signature_callback above can't always infer the right signature
# (e.g. when the expression is a variable that happens to be a Literal str)
# so we need to handle the check ourselves here and make sure the provided
# default can be assigned to all key-value pairs we're updating.
if not is_subtype(default_type, value_type):
ctx.api.msg.typeddict_setdefault_arguments_inconsistent(
default_type, value_type, ctx.context)
return AnyType(TypeOfAny.from_error)
value_types.append(value_type)
return make_simplified_union(value_types)
return ctx.default_return_type
def typed_dict_delitem_signature_callback(ctx: MethodSigContext) -> CallableType:
# Replace NoReturn as the argument type.
str_type = ctx.api.named_generic_type('builtins.str', [])
return ctx.default_signature.copy_modified(arg_types=[str_type])
def typed_dict_delitem_callback(ctx: MethodContext) -> Type:
"""Type check TypedDict.__delitem__."""
if (isinstance(ctx.type, TypedDictType)
and len(ctx.arg_types) == 1
and len(ctx.arg_types[0]) == 1):
keys = try_getting_str_literals(ctx.args[0][0], ctx.arg_types[0][0])
if keys is None:
ctx.api.fail(message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL, ctx.context)
return AnyType(TypeOfAny.from_error)
for key in keys:
if key in ctx.type.required_keys:
ctx.api.msg.typeddict_key_cannot_be_deleted(ctx.type, key, ctx.context)
elif key not in ctx.type.items:
ctx.api.msg.typeddict_key_not_found(ctx.type, key, ctx.context)
return ctx.default_return_type
def typed_dict_update_signature_callback(ctx: MethodSigContext) -> CallableType:
"""Try to infer a better signature type for TypedDict.update."""
signature = ctx.default_signature
if (isinstance(ctx.type, TypedDictType)
and len(signature.arg_types) == 1):
arg_type = get_proper_type(signature.arg_types[0])
assert isinstance(arg_type, TypedDictType)
arg_type = arg_type.as_anonymous()
arg_type = arg_type.copy_modified(required_keys=set())
return signature.copy_modified(arg_types=[arg_type])
return signature
def int_pow_callback(ctx: MethodContext) -> Type:
"""Infer a more precise return type for int.__pow__."""
# int.__pow__ has an optional modulo argument,
# so we expect 2 argument positions
if (len(ctx.arg_types) == 2
and len(ctx.arg_types[0]) == 1 and len(ctx.arg_types[1]) == 0):
arg = ctx.args[0][0]
if isinstance(arg, IntExpr):
exponent = arg.value
elif isinstance(arg, UnaryExpr) and arg.op == '-' and isinstance(arg.expr, IntExpr):
exponent = -arg.expr.value
else:
# Right operand not an int literal or a negated literal -- give up.
return ctx.default_return_type
if exponent >= 0:
return ctx.api.named_generic_type('builtins.int', [])
else:
return ctx.api.named_generic_type('builtins.float', [])
return ctx.default_return_type
def int_neg_callback(ctx: MethodContext) -> Type:
"""Infer a more precise return type for int.__neg__.
This is mainly used to infer the return type as LiteralType
if the original underlying object is a LiteralType object
"""
if isinstance(ctx.type, Instance) and ctx.type.last_known_value is not None:
value = ctx.type.last_known_value.value
fallback = ctx.type.last_known_value.fallback
if isinstance(value, int):
if is_literal_type_like(ctx.api.type_context[-1]):
return LiteralType(value=-value, fallback=fallback)
else:
return ctx.type.copy_modified(last_known_value=LiteralType(
value=-value,
fallback=ctx.type,
line=ctx.type.line,
column=ctx.type.column,
))
elif isinstance(ctx.type, LiteralType):
value = ctx.type.value
fallback = ctx.type.fallback
if isinstance(value, int):
return LiteralType(value=-value, fallback=fallback)
return ctx.default_return_type
def tuple_mul_callback(ctx: MethodContext) -> Type:
"""Infer a more precise return type for tuple.__mul__ and tuple.__rmul__.
This is used to return a specific sized tuple if multiplied by Literal int
"""
if not isinstance(ctx.type, TupleType):
return ctx.default_return_type
arg_type = get_proper_type(ctx.arg_types[0][0])
if isinstance(arg_type, Instance) and arg_type.last_known_value is not None:
value = arg_type.last_known_value.value
if isinstance(value, int):
return ctx.type.copy_modified(items=ctx.type.items * value)
elif isinstance(ctx.type, LiteralType):
value = arg_type.value
if isinstance(value, int):
return ctx.type.copy_modified(items=ctx.type.items * value)
return ctx.default_return_type
|
the-stack_0_24293 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Dataset Resource.
See: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets
"""
import json
from google.cloud.forseti.common.gcp_type import resource
class DatasetLifecycleState(resource.LifecycleState):
"""Represents the dataset's LifecycleState."""
pass
class Dataset(resource.Resource):
"""Dataset resource."""
RESOURCE_NAME_FMT = 'datasets/%s'
def __init__(
self,
dataset_id,
full_name=None,
data=None,
name=None,
display_name=None,
parent=None,
locations=None,
lifecycle_state=DatasetLifecycleState.UNSPECIFIED):
"""Initialize.
Args:
dataset_id (int): The dataset id.
full_name (str): The full resource name and ancestry.
data (str): Resource representation of the dataset.
name (str): The dataset's unique GCP name, with the
format "datasets/{id}".
display_name (str): The dataset's display name.
locations (List[str]): Locations this dataset resides in. If set,
there should be exactly one element in the list.
parent (Resource): The parent Resource.
lifecycle_state (LifecycleState): The lifecycle state of the
dataset.
"""
super(Dataset, self).__init__(
resource_id=dataset_id,
resource_type=resource.ResourceType.DATASET,
name=name,
display_name=display_name,
parent=parent,
locations=locations,
lifecycle_state=lifecycle_state)
self.full_name = full_name
self.data = data
@classmethod
def from_json(cls, parent, json_string):
"""Create a dataset from a JSON string.
Args:
parent (Resource): resource this dataset belongs to.
json_string(str): JSON string of a dataset GCP API response.
Returns:
Dataset: dataset resource.
"""
dataset_dict = json.loads(json_string)
dataset_id = dataset_dict['id']
return cls(
parent=parent,
dataset_id=dataset_id,
full_name='{}dataset/{}/'.format(parent.full_name, dataset_id),
display_name=dataset_id,
# The default location if not specified is US
# https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets
locations=[dataset_dict.get('location', 'US')],
data=json_string,
)
|
the-stack_0_24295 | #!/usr/bin/env python
"""
This script is used to build "official" universal installers on macOS.
NEW for 3.7.0:
- support Intel 64-bit-only () and 32-bit-only installer builds
- use external Tcl/Tk 8.6 for 10.9+ builds
- deprecate use of explicit SDK (--sdk-path=) since all but the oldest
versions of Xcode support implicit setting of an SDK via environment
variables (SDKROOT and friends, see the xcrun man page for more info).
The SDK stuff was primarily needed for building universal installers
for 10.4; so as of 3.7.0, building installers for 10.4 is no longer
supported with build-installer.
- use generic "gcc" as compiler (CC env var) rather than "gcc-4.2"
TODO:
- support SDKROOT and DEVELOPER_DIR xcrun env variables
- test with 10.5 and 10.4 and determine support status
Please ensure that this script keeps working with Python 2.5, to avoid
bootstrap issues (/usr/bin/python is Python 2.5 on OSX 10.5). Doc builds
use current versions of Sphinx and require a reasonably current python3.
Sphinx and dependencies are installed into a venv using the python3's pip
so will fetch them from PyPI if necessary. Since python3 is now used for
Sphinx, build-installer.py should also be converted to use python3!
build-installer currently requires an installed third-party version of
Tcl/Tk 8.4 (for OS X 10.4 and 10.5 deployment targets), Tcl/TK 8.5
(for 10.6 or later), or Tcl/TK 8.6 (for 10.9 or later)
installed in /Library/Frameworks. When installed,
the Python built by this script will attempt to dynamically link first to
Tcl and Tk frameworks in /Library/Frameworks if available otherwise fall
back to the ones in /System/Library/Framework. For the build, we recommend
installing the most recent ActiveTcl 8.6. 8.5, or 8.4 version, depending
on the deployment target. The actual version linked to depends on the
path of /Library/Frameworks/{Tcl,Tk}.framework/Versions/Current.
Usage: see USAGE variable in the script.
"""
import platform, os, sys, getopt, textwrap, shutil, stat, time, pwd, grp
try:
import urllib2 as urllib_request
except ImportError:
import urllib.request as urllib_request
STAT_0o755 = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IXGRP
| stat.S_IROTH | stat.S_IXOTH )
STAT_0o775 = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
| stat.S_IROTH | stat.S_IXOTH )
INCLUDE_TIMESTAMP = 1
VERBOSE = 1
from plistlib import Plist
try:
from plistlib import writePlist
except ImportError:
# We're run using python2.3
def writePlist(plist, path):
plist.write(path)
def shellQuote(value):
"""
Return the string value in a form that can safely be inserted into
a shell command.
"""
return "'%s'"%(value.replace("'", "'\"'\"'"))
def grepValue(fn, variable):
"""
Return the unquoted value of a variable from a file..
QUOTED_VALUE='quotes' -> str('quotes')
UNQUOTED_VALUE=noquotes -> str('noquotes')
"""
variable = variable + '='
for ln in open(fn, 'r'):
if ln.startswith(variable):
value = ln[len(variable):].strip()
return value.strip("\"'")
raise RuntimeError("Cannot find variable %s" % variable[:-1])
_cache_getVersion = None
def getVersion():
global _cache_getVersion
if _cache_getVersion is None:
_cache_getVersion = grepValue(
os.path.join(SRCDIR, 'configure'), 'PACKAGE_VERSION')
return _cache_getVersion
def getVersionMajorMinor():
return tuple([int(n) for n in getVersion().split('.', 2)])
_cache_getFullVersion = None
def getFullVersion():
global _cache_getFullVersion
if _cache_getFullVersion is not None:
return _cache_getFullVersion
fn = os.path.join(SRCDIR, 'Include', 'patchlevel.h')
for ln in open(fn):
if 'PY_VERSION' in ln:
_cache_getFullVersion = ln.split()[-1][1:-1]
return _cache_getFullVersion
raise RuntimeError("Cannot find full version??")
FW_PREFIX = ["Library", "Frameworks", "Python.framework"]
FW_VERSION_PREFIX = "--undefined--" # initialized in parseOptions
FW_SSL_DIRECTORY = "--undefined--" # initialized in parseOptions
# The directory we'll use to create the build (will be erased and recreated)
WORKDIR = "/tmp/_py"
# The directory we'll use to store third-party sources. Set this to something
# else if you don't want to re-fetch required libraries every time.
DEPSRC = os.path.join(WORKDIR, 'third-party')
DEPSRC = os.path.expanduser('~/Universal/other-sources')
universal_opts_map = { '32-bit': ('i386', 'ppc',),
'64-bit': ('x86_64', 'ppc64',),
'intel': ('i386', 'x86_64'),
'intel-32': ('i386',),
'intel-64': ('x86_64',),
'3-way': ('ppc', 'i386', 'x86_64'),
'all': ('i386', 'ppc', 'x86_64', 'ppc64',) }
default_target_map = {
'64-bit': '10.5',
'3-way': '10.5',
'intel': '10.5',
'intel-32': '10.4',
'intel-64': '10.5',
'all': '10.5',
}
UNIVERSALOPTS = tuple(universal_opts_map.keys())
UNIVERSALARCHS = '32-bit'
ARCHLIST = universal_opts_map[UNIVERSALARCHS]
# Source directory (assume we're in Mac/BuildScript)
SRCDIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__
))))
# $MACOSX_DEPLOYMENT_TARGET -> minimum OS X level
DEPTARGET = '10.5'
def getDeptargetTuple():
return tuple([int(n) for n in DEPTARGET.split('.')[0:2]])
def getTargetCompilers():
target_cc_map = {
'10.4': ('gcc-4.0', 'g++-4.0'),
'10.5': ('gcc', 'g++'),
'10.6': ('gcc', 'g++'),
}
return target_cc_map.get(DEPTARGET, ('gcc', 'gcc++') )
CC, CXX = getTargetCompilers()
PYTHON_3 = getVersionMajorMinor() >= (3, 0)
USAGE = textwrap.dedent("""\
Usage: build_python [options]
Options:
-? or -h: Show this message
-b DIR
--build-dir=DIR: Create build here (default: %(WORKDIR)r)
--third-party=DIR: Store third-party sources here (default: %(DEPSRC)r)
--sdk-path=DIR: Location of the SDK (deprecated, use SDKROOT env variable)
--src-dir=DIR: Location of the Python sources (default: %(SRCDIR)r)
--dep-target=10.n macOS deployment target (default: %(DEPTARGET)r)
--universal-archs=x universal architectures (options: %(UNIVERSALOPTS)r, default: %(UNIVERSALARCHS)r)
""")% globals()
# Dict of object file names with shared library names to check after building.
# This is to ensure that we ended up dynamically linking with the shared
# library paths and versions we expected. For example:
# EXPECTED_SHARED_LIBS['_tkinter.so'] = [
# '/Library/Frameworks/Tcl.framework/Versions/8.5/Tcl',
# '/Library/Frameworks/Tk.framework/Versions/8.5/Tk']
EXPECTED_SHARED_LIBS = {}
# Are we building and linking with our own copy of Tcl/TK?
# For now, do so if deployment target is 10.9+.
def internalTk():
return getDeptargetTuple() >= (10, 9)
# List of names of third party software built with this installer.
# The names will be inserted into the rtf version of the License.
THIRD_PARTY_LIBS = []
# Instructions for building libraries that are necessary for building a
# batteries included python.
# [The recipes are defined here for convenience but instantiated later after
# command line options have been processed.]
def library_recipes():
result = []
LT_10_5 = bool(getDeptargetTuple() < (10, 5))
# Since Apple removed the header files for the deprecated system
# OpenSSL as of the Xcode 7 release (for OS X 10.10+), we do not
# have much choice but to build our own copy here, too.
result.extend([
dict(
name="OpenSSL 1.1.0g",
url="https://www.openssl.org/source/openssl-1.1.0g.tar.gz",
checksum='ba5f1b8b835b88cadbce9b35ed9531a6',
buildrecipe=build_universal_openssl,
configure=None,
install=None,
),
])
if internalTk():
result.extend([
dict(
name="Tcl 8.6.7",
url="ftp://ftp.tcl.tk/pub/tcl//tcl8_6/tcl8.6.7-src.tar.gz",
checksum='5673aaf45b5de5d8dd80bb3daaeb8838',
buildDir="unix",
configure_pre=[
'--enable-shared',
'--enable-threads',
'--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib'%(getVersion(),),
],
useLDFlags=False,
install='make TCL_LIBRARY=%(TCL_LIBRARY)s && make install TCL_LIBRARY=%(TCL_LIBRARY)s DESTDIR=%(DESTDIR)s'%{
"DESTDIR": shellQuote(os.path.join(WORKDIR, 'libraries')),
"TCL_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tcl8.6'%(getVersion())),
},
),
dict(
name="Tk 8.6.7",
url="ftp://ftp.tcl.tk/pub/tcl//tcl8_6/tk8.6.7-src.tar.gz",
checksum='46ea9c0165c515d87393700f4891ab6f',
buildDir="unix",
configure_pre=[
'--enable-aqua',
'--enable-shared',
'--enable-threads',
'--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib'%(getVersion(),),
],
useLDFlags=False,
install='make TCL_LIBRARY=%(TCL_LIBRARY)s TK_LIBRARY=%(TK_LIBRARY)s && make install TCL_LIBRARY=%(TCL_LIBRARY)s TK_LIBRARY=%(TK_LIBRARY)s DESTDIR=%(DESTDIR)s'%{
"DESTDIR": shellQuote(os.path.join(WORKDIR, 'libraries')),
"TCL_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tcl8.6'%(getVersion())),
"TK_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tk8.6'%(getVersion())),
},
),
])
if PYTHON_3:
result.extend([
dict(
name="XZ 5.2.3",
url="http://tukaani.org/xz/xz-5.2.3.tar.gz",
checksum='ef68674fb47a8b8e741b34e429d86e9d',
configure_pre=[
'--disable-dependency-tracking',
]
),
])
result.extend([
dict(
name="NCurses 5.9",
url="http://ftp.gnu.org/pub/gnu/ncurses/ncurses-5.9.tar.gz",
checksum='8cb9c412e5f2d96bc6f459aa8c6282a1',
configure_pre=[
"--enable-widec",
"--without-cxx",
"--without-cxx-binding",
"--without-ada",
"--without-curses-h",
"--enable-shared",
"--with-shared",
"--without-debug",
"--without-normal",
"--without-tests",
"--without-manpages",
"--datadir=/usr/share",
"--sysconfdir=/etc",
"--sharedstatedir=/usr/com",
"--with-terminfo-dirs=/usr/share/terminfo",
"--with-default-terminfo-dir=/usr/share/terminfo",
"--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib"%(getVersion(),),
],
patchscripts=[
("ftp://invisible-island.net/ncurses//5.9/ncurses-5.9-20120616-patch.sh.bz2",
"f54bf02a349f96a7c4f0d00922f3a0d4"),
],
useLDFlags=False,
install='make && make install DESTDIR=%s && cd %s/usr/local/lib && ln -fs ../../../Library/Frameworks/Python.framework/Versions/%s/lib/lib* .'%(
shellQuote(os.path.join(WORKDIR, 'libraries')),
shellQuote(os.path.join(WORKDIR, 'libraries')),
getVersion(),
),
),
dict(
name="SQLite 3.22.0",
url="https://www.sqlite.org/2018/sqlite-autoconf-3220000.tar.gz",
checksum='96b5648d542e8afa6ab7ffb8db8ddc3d',
extra_cflags=('-Os '
'-DSQLITE_ENABLE_FTS5 '
'-DSQLITE_ENABLE_FTS4 '
'-DSQLITE_ENABLE_FTS3_PARENTHESIS '
'-DSQLITE_ENABLE_JSON1 '
'-DSQLITE_ENABLE_RTREE '
'-DSQLITE_TCL=0 '
'%s' % ('','-DSQLITE_WITHOUT_ZONEMALLOC ')[LT_10_5]),
configure_pre=[
'--enable-threadsafe',
'--enable-shared=no',
'--enable-static=yes',
'--disable-readline',
'--disable-dependency-tracking',
]
),
])
if getDeptargetTuple() < (10, 5):
result.extend([
dict(
name="Bzip2 1.0.6",
url="http://bzip.org/1.0.6/bzip2-1.0.6.tar.gz",
checksum='00b516f4704d4a7cb50a1d97e6e8e15b',
configure=None,
install='make install CC=%s CXX=%s, PREFIX=%s/usr/local/ CFLAGS="-arch %s"'%(
CC, CXX,
shellQuote(os.path.join(WORKDIR, 'libraries')),
' -arch '.join(ARCHLIST),
),
),
dict(
name="ZLib 1.2.3",
url="http://www.gzip.org/zlib/zlib-1.2.3.tar.gz",
checksum='debc62758716a169df9f62e6ab2bc634',
configure=None,
install='make install CC=%s CXX=%s, prefix=%s/usr/local/ CFLAGS="-arch %s"'%(
CC, CXX,
shellQuote(os.path.join(WORKDIR, 'libraries')),
' -arch '.join(ARCHLIST),
),
),
dict(
# Note that GNU readline is GPL'd software
name="GNU Readline 6.1.2",
url="http://ftp.gnu.org/pub/gnu/readline/readline-6.1.tar.gz" ,
checksum='fc2f7e714fe792db1ce6ddc4c9fb4ef3',
patchlevel='0',
patches=[
# The readline maintainers don't do actual micro releases, but
# just ship a set of patches.
('http://ftp.gnu.org/pub/gnu/readline/readline-6.1-patches/readline61-001',
'c642f2e84d820884b0bf9fd176bc6c3f'),
('http://ftp.gnu.org/pub/gnu/readline/readline-6.1-patches/readline61-002',
'1a76781a1ea734e831588285db7ec9b1'),
]
),
])
if not PYTHON_3:
result.extend([
dict(
name="Sleepycat DB 4.7.25",
url="http://download.oracle.com/berkeley-db/db-4.7.25.tar.gz",
checksum='ec2b87e833779681a0c3a814aa71359e',
buildDir="build_unix",
configure="../dist/configure",
configure_pre=[
'--includedir=/usr/local/include/db4',
]
),
])
return result
# Instructions for building packages inside the .mpkg.
def pkg_recipes():
unselected_for_python3 = ('selected', 'unselected')[PYTHON_3]
result = [
dict(
name="PythonFramework",
long_name="Python Framework",
source="/Library/Frameworks/Python.framework",
readme="""\
This package installs Python.framework, that is the python
interpreter and the standard library.
""",
postflight="scripts/postflight.framework",
selected='selected',
),
dict(
name="PythonApplications",
long_name="GUI Applications",
source="/Applications/Python %(VER)s",
readme="""\
This package installs IDLE (an interactive Python IDE),
Python Launcher and Build Applet (create application bundles
from python scripts).
It also installs a number of examples and demos.
""",
required=False,
selected='selected',
),
dict(
name="PythonUnixTools",
long_name="UNIX command-line tools",
source="/usr/local/bin",
readme="""\
This package installs the unix tools in /usr/local/bin for
compatibility with older releases of Python. This package
is not necessary to use Python.
""",
required=False,
selected='selected',
),
dict(
name="PythonDocumentation",
long_name="Python Documentation",
topdir="/Library/Frameworks/Python.framework/Versions/%(VER)s/Resources/English.lproj/Documentation",
source="/pydocs",
readme="""\
This package installs the python documentation at a location
that is useable for pydoc and IDLE.
""",
postflight="scripts/postflight.documentation",
required=False,
selected='selected',
),
dict(
name="PythonProfileChanges",
long_name="Shell profile updater",
readme="""\
This packages updates your shell profile to make sure that
the Python tools are found by your shell in preference of
the system provided Python tools.
If you don't install this package you'll have to add
"/Library/Frameworks/Python.framework/Versions/%(VER)s/bin"
to your PATH by hand.
""",
postflight="scripts/postflight.patch-profile",
topdir="/Library/Frameworks/Python.framework",
source="/empty-dir",
required=False,
selected='selected',
),
dict(
name="PythonInstallPip",
long_name="Install or upgrade pip",
readme="""\
This package installs (or upgrades from an earlier version)
pip, a tool for installing and managing Python packages.
""",
postflight="scripts/postflight.ensurepip",
topdir="/Library/Frameworks/Python.framework",
source="/empty-dir",
required=False,
selected='selected',
),
]
return result
def fatal(msg):
"""
A fatal error, bail out.
"""
sys.stderr.write('FATAL: ')
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit(1)
def fileContents(fn):
"""
Return the contents of the named file
"""
return open(fn, 'r').read()
def runCommand(commandline):
"""
Run a command and raise RuntimeError if it fails. Output is suppressed
unless the command fails.
"""
fd = os.popen(commandline, 'r')
data = fd.read()
xit = fd.close()
if xit is not None:
sys.stdout.write(data)
raise RuntimeError("command failed: %s"%(commandline,))
if VERBOSE:
sys.stdout.write(data); sys.stdout.flush()
def captureCommand(commandline):
fd = os.popen(commandline, 'r')
data = fd.read()
xit = fd.close()
if xit is not None:
sys.stdout.write(data)
raise RuntimeError("command failed: %s"%(commandline,))
return data
def getTclTkVersion(configfile, versionline):
"""
search Tcl or Tk configuration file for version line
"""
try:
f = open(configfile, "r")
except OSError:
fatal("Framework configuration file not found: %s" % configfile)
for l in f:
if l.startswith(versionline):
f.close()
return l
fatal("Version variable %s not found in framework configuration file: %s"
% (versionline, configfile))
def checkEnvironment():
"""
Check that we're running on a supported system.
"""
if sys.version_info[0:2] < (2, 5):
fatal("This script must be run with Python 2.5 (or later)")
if platform.system() != 'Darwin':
fatal("This script should be run on a macOS 10.5 (or later) system")
if int(platform.release().split('.')[0]) < 8:
fatal("This script should be run on a macOS 10.5 (or later) system")
# Because we only support dynamic load of only one major/minor version of
# Tcl/Tk, if we are not using building and using our own private copy of
# Tcl/Tk, ensure:
# 1. there is a user-installed framework (usually ActiveTcl) in (or linked
# in) SDKROOT/Library/Frameworks. As of Python 3.7.0, we no longer
# enforce that the version of the user-installed framework also
# exists in the system-supplied Tcl/Tk frameworks. Time to support
# Tcl/Tk 8.6 even if Apple does not.
if not internalTk():
frameworks = {}
for framework in ['Tcl', 'Tk']:
fwpth = 'Library/Frameworks/%s.framework/Versions/Current' % framework
libfw = os.path.join('/', fwpth)
usrfw = os.path.join(os.getenv('HOME'), fwpth)
frameworks[framework] = os.readlink(libfw)
if not os.path.exists(libfw):
fatal("Please install a link to a current %s %s as %s so "
"the user can override the system framework."
% (framework, frameworks[framework], libfw))
if os.path.exists(usrfw):
fatal("Please rename %s to avoid possible dynamic load issues."
% usrfw)
if frameworks['Tcl'] != frameworks['Tk']:
fatal("The Tcl and Tk frameworks are not the same version.")
print(" -- Building with external Tcl/Tk %s frameworks"
% frameworks['Tk'])
# add files to check after build
EXPECTED_SHARED_LIBS['_tkinter.so'] = [
"/Library/Frameworks/Tcl.framework/Versions/%s/Tcl"
% frameworks['Tcl'],
"/Library/Frameworks/Tk.framework/Versions/%s/Tk"
% frameworks['Tk'],
]
else:
print(" -- Building private copy of Tcl/Tk")
print("")
# Remove inherited environment variables which might influence build
environ_var_prefixes = ['CPATH', 'C_INCLUDE_', 'DYLD_', 'LANG', 'LC_',
'LD_', 'LIBRARY_', 'PATH', 'PYTHON']
for ev in list(os.environ):
for prefix in environ_var_prefixes:
if ev.startswith(prefix) :
print("INFO: deleting environment variable %s=%s" % (
ev, os.environ[ev]))
del os.environ[ev]
base_path = '/bin:/sbin:/usr/bin:/usr/sbin'
if 'SDK_TOOLS_BIN' in os.environ:
base_path = os.environ['SDK_TOOLS_BIN'] + ':' + base_path
# Xcode 2.5 on OS X 10.4 does not include SetFile in its usr/bin;
# add its fixed location here if it exists
OLD_DEVELOPER_TOOLS = '/Developer/Tools'
if os.path.isdir(OLD_DEVELOPER_TOOLS):
base_path = base_path + ':' + OLD_DEVELOPER_TOOLS
os.environ['PATH'] = base_path
print("Setting default PATH: %s"%(os.environ['PATH']))
# Ensure we have access to sphinx-build.
# You may have to create a link in /usr/bin for it.
runCommand('sphinx-build --version')
def parseOptions(args=None):
"""
Parse arguments and update global settings.
"""
global WORKDIR, DEPSRC, SRCDIR, DEPTARGET
global UNIVERSALOPTS, UNIVERSALARCHS, ARCHLIST, CC, CXX
global FW_VERSION_PREFIX
global FW_SSL_DIRECTORY
if args is None:
args = sys.argv[1:]
try:
options, args = getopt.getopt(args, '?hb',
[ 'build-dir=', 'third-party=', 'sdk-path=' , 'src-dir=',
'dep-target=', 'universal-archs=', 'help' ])
except getopt.GetoptError:
print(sys.exc_info()[1])
sys.exit(1)
if args:
print("Additional arguments")
sys.exit(1)
deptarget = None
for k, v in options:
if k in ('-h', '-?', '--help'):
print(USAGE)
sys.exit(0)
elif k in ('-d', '--build-dir'):
WORKDIR=v
elif k in ('--third-party',):
DEPSRC=v
elif k in ('--sdk-path',):
print(" WARNING: --sdk-path is no longer supported")
elif k in ('--src-dir',):
SRCDIR=v
elif k in ('--dep-target', ):
DEPTARGET=v
deptarget=v
elif k in ('--universal-archs', ):
if v in UNIVERSALOPTS:
UNIVERSALARCHS = v
ARCHLIST = universal_opts_map[UNIVERSALARCHS]
if deptarget is None:
# Select alternate default deployment
# target
DEPTARGET = default_target_map.get(v, '10.5')
else:
raise NotImplementedError(v)
else:
raise NotImplementedError(k)
SRCDIR=os.path.abspath(SRCDIR)
WORKDIR=os.path.abspath(WORKDIR)
DEPSRC=os.path.abspath(DEPSRC)
CC, CXX = getTargetCompilers()
FW_VERSION_PREFIX = FW_PREFIX[:] + ["Versions", getVersion()]
FW_SSL_DIRECTORY = FW_VERSION_PREFIX[:] + ["etc", "openssl"]
print("-- Settings:")
print(" * Source directory: %s" % SRCDIR)
print(" * Build directory: %s" % WORKDIR)
print(" * Third-party source: %s" % DEPSRC)
print(" * Deployment target: %s" % DEPTARGET)
print(" * Universal archs: %s" % str(ARCHLIST))
print(" * C compiler: %s" % CC)
print(" * C++ compiler: %s" % CXX)
print("")
print(" -- Building a Python %s framework at patch level %s"
% (getVersion(), getFullVersion()))
print("")
def extractArchive(builddir, archiveName):
"""
Extract a source archive into 'builddir'. Returns the path of the
extracted archive.
XXX: This function assumes that archives contain a toplevel directory
that is has the same name as the basename of the archive. This is
safe enough for almost anything we use. Unfortunately, it does not
work for current Tcl and Tk source releases where the basename of
the archive ends with "-src" but the uncompressed directory does not.
For now, just special case Tcl and Tk tar.gz downloads.
"""
curdir = os.getcwd()
try:
os.chdir(builddir)
if archiveName.endswith('.tar.gz'):
retval = os.path.basename(archiveName[:-7])
if ((retval.startswith('tcl') or retval.startswith('tk'))
and retval.endswith('-src')):
retval = retval[:-4]
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar zxf %s 2>&1"%(shellQuote(archiveName),), 'r')
elif archiveName.endswith('.tar.bz2'):
retval = os.path.basename(archiveName[:-8])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar jxf %s 2>&1"%(shellQuote(archiveName),), 'r')
elif archiveName.endswith('.tar'):
retval = os.path.basename(archiveName[:-4])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar xf %s 2>&1"%(shellQuote(archiveName),), 'r')
elif archiveName.endswith('.zip'):
retval = os.path.basename(archiveName[:-4])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("unzip %s 2>&1"%(shellQuote(archiveName),), 'r')
data = fp.read()
xit = fp.close()
if xit is not None:
sys.stdout.write(data)
raise RuntimeError("Cannot extract %s"%(archiveName,))
return os.path.join(builddir, retval)
finally:
os.chdir(curdir)
def downloadURL(url, fname):
"""
Download the contents of the url into the file.
"""
fpIn = urllib_request.urlopen(url)
fpOut = open(fname, 'wb')
block = fpIn.read(10240)
try:
while block:
fpOut.write(block)
block = fpIn.read(10240)
fpIn.close()
fpOut.close()
except:
try:
os.unlink(fname)
except OSError:
pass
def verifyThirdPartyFile(url, checksum, fname):
"""
Download file from url to filename fname if it does not already exist.
Abort if file contents does not match supplied md5 checksum.
"""
name = os.path.basename(fname)
if os.path.exists(fname):
print("Using local copy of %s"%(name,))
else:
print("Did not find local copy of %s"%(name,))
print("Downloading %s"%(name,))
downloadURL(url, fname)
print("Archive for %s stored as %s"%(name, fname))
if os.system(
'MD5=$(openssl md5 %s) ; test "${MD5##*= }" = "%s"'
% (shellQuote(fname), checksum) ):
fatal('MD5 checksum mismatch for file %s' % fname)
def build_universal_openssl(basedir, archList):
"""
Special case build recipe for universal build of openssl.
The upstream OpenSSL build system does not directly support
OS X universal builds. We need to build each architecture
separately then lipo them together into fat libraries.
"""
# OpenSSL fails to build with Xcode 2.5 (on OS X 10.4).
# If we are building on a 10.4.x or earlier system,
# unilaterally disable assembly code building to avoid the problem.
no_asm = int(platform.release().split(".")[0]) < 9
def build_openssl_arch(archbase, arch):
"Build one architecture of openssl"
arch_opts = {
"i386": ["darwin-i386-cc"],
"x86_64": ["darwin64-x86_64-cc", "enable-ec_nistp_64_gcc_128"],
"ppc": ["darwin-ppc-cc"],
"ppc64": ["darwin64-ppc-cc"],
}
configure_opts = [
"no-idea",
"no-mdc2",
"no-rc5",
"no-zlib",
"no-ssl3",
# "enable-unit-test",
"shared",
"--prefix=%s"%os.path.join("/", *FW_VERSION_PREFIX),
"--openssldir=%s"%os.path.join("/", *FW_SSL_DIRECTORY),
]
if no_asm:
configure_opts.append("no-asm")
runCommand(" ".join(["perl", "Configure"]
+ arch_opts[arch] + configure_opts))
runCommand("make depend")
runCommand("make all")
runCommand("make install_sw DESTDIR=%s"%shellQuote(archbase))
# runCommand("make test")
return
srcdir = os.getcwd()
universalbase = os.path.join(srcdir, "..",
os.path.basename(srcdir) + "-universal")
os.mkdir(universalbase)
archbasefws = []
for arch in archList:
# fresh copy of the source tree
archsrc = os.path.join(universalbase, arch, "src")
shutil.copytree(srcdir, archsrc, symlinks=True)
# install base for this arch
archbase = os.path.join(universalbase, arch, "root")
os.mkdir(archbase)
# Python framework base within install_prefix:
# the build will install into this framework..
# This is to ensure that the resulting shared libs have
# the desired real install paths built into them.
archbasefw = os.path.join(archbase, *FW_VERSION_PREFIX)
# build one architecture
os.chdir(archsrc)
build_openssl_arch(archbase, arch)
os.chdir(srcdir)
archbasefws.append(archbasefw)
# copy arch-independent files from last build into the basedir framework
basefw = os.path.join(basedir, *FW_VERSION_PREFIX)
shutil.copytree(
os.path.join(archbasefw, "include", "openssl"),
os.path.join(basefw, "include", "openssl")
)
shlib_version_number = grepValue(os.path.join(archsrc, "Makefile"),
"SHLIB_VERSION_NUMBER")
# e.g. -> "1.0.0"
libcrypto = "libcrypto.dylib"
libcrypto_versioned = libcrypto.replace(".", "."+shlib_version_number+".")
# e.g. -> "libcrypto.1.0.0.dylib"
libssl = "libssl.dylib"
libssl_versioned = libssl.replace(".", "."+shlib_version_number+".")
# e.g. -> "libssl.1.0.0.dylib"
try:
os.mkdir(os.path.join(basefw, "lib"))
except OSError:
pass
# merge the individual arch-dependent shared libs into a fat shared lib
archbasefws.insert(0, basefw)
for (lib_unversioned, lib_versioned) in [
(libcrypto, libcrypto_versioned),
(libssl, libssl_versioned)
]:
runCommand("lipo -create -output " +
" ".join(shellQuote(
os.path.join(fw, "lib", lib_versioned))
for fw in archbasefws))
# and create an unversioned symlink of it
os.symlink(lib_versioned, os.path.join(basefw, "lib", lib_unversioned))
# Create links in the temp include and lib dirs that will be injected
# into the Python build so that setup.py can find them while building
# and the versioned links so that the setup.py post-build import test
# does not fail.
relative_path = os.path.join("..", "..", "..", *FW_VERSION_PREFIX)
for fn in [
["include", "openssl"],
["lib", libcrypto],
["lib", libssl],
["lib", libcrypto_versioned],
["lib", libssl_versioned],
]:
os.symlink(
os.path.join(relative_path, *fn),
os.path.join(basedir, "usr", "local", *fn)
)
return
def buildRecipe(recipe, basedir, archList):
"""
Build software using a recipe. This function does the
'configure;make;make install' dance for C software, with a possibility
to customize this process, basically a poor-mans DarwinPorts.
"""
curdir = os.getcwd()
name = recipe['name']
THIRD_PARTY_LIBS.append(name)
url = recipe['url']
configure = recipe.get('configure', './configure')
buildrecipe = recipe.get('buildrecipe', None)
install = recipe.get('install', 'make && make install DESTDIR=%s'%(
shellQuote(basedir)))
archiveName = os.path.split(url)[-1]
sourceArchive = os.path.join(DEPSRC, archiveName)
if not os.path.exists(DEPSRC):
os.mkdir(DEPSRC)
verifyThirdPartyFile(url, recipe['checksum'], sourceArchive)
print("Extracting archive for %s"%(name,))
buildDir=os.path.join(WORKDIR, '_bld')
if not os.path.exists(buildDir):
os.mkdir(buildDir)
workDir = extractArchive(buildDir, sourceArchive)
os.chdir(workDir)
for patch in recipe.get('patches', ()):
if isinstance(patch, tuple):
url, checksum = patch
fn = os.path.join(DEPSRC, os.path.basename(url))
verifyThirdPartyFile(url, checksum, fn)
else:
# patch is a file in the source directory
fn = os.path.join(curdir, patch)
runCommand('patch -p%s < %s'%(recipe.get('patchlevel', 1),
shellQuote(fn),))
for patchscript in recipe.get('patchscripts', ()):
if isinstance(patchscript, tuple):
url, checksum = patchscript
fn = os.path.join(DEPSRC, os.path.basename(url))
verifyThirdPartyFile(url, checksum, fn)
else:
# patch is a file in the source directory
fn = os.path.join(curdir, patchscript)
if fn.endswith('.bz2'):
runCommand('bunzip2 -fk %s' % shellQuote(fn))
fn = fn[:-4]
runCommand('sh %s' % shellQuote(fn))
os.unlink(fn)
if 'buildDir' in recipe:
os.chdir(recipe['buildDir'])
if configure is not None:
configure_args = [
"--prefix=/usr/local",
"--enable-static",
"--disable-shared",
#"CPP=gcc -arch %s -E"%(' -arch '.join(archList,),),
]
if 'configure_pre' in recipe:
args = list(recipe['configure_pre'])
if '--disable-static' in args:
configure_args.remove('--enable-static')
if '--enable-shared' in args:
configure_args.remove('--disable-shared')
configure_args.extend(args)
if recipe.get('useLDFlags', 1):
configure_args.extend([
"CFLAGS=%s-mmacosx-version-min=%s -arch %s "
"-I%s/usr/local/include"%(
recipe.get('extra_cflags', ''),
DEPTARGET,
' -arch '.join(archList),
shellQuote(basedir)[1:-1],),
"LDFLAGS=-mmacosx-version-min=%s -L%s/usr/local/lib -arch %s"%(
DEPTARGET,
shellQuote(basedir)[1:-1],
' -arch '.join(archList)),
])
else:
configure_args.extend([
"CFLAGS=%s-mmacosx-version-min=%s -arch %s "
"-I%s/usr/local/include"%(
recipe.get('extra_cflags', ''),
DEPTARGET,
' -arch '.join(archList),
shellQuote(basedir)[1:-1],),
])
if 'configure_post' in recipe:
configure_args = configure_args + list(recipe['configure_post'])
configure_args.insert(0, configure)
configure_args = [ shellQuote(a) for a in configure_args ]
print("Running configure for %s"%(name,))
runCommand(' '.join(configure_args) + ' 2>&1')
if buildrecipe is not None:
# call special-case build recipe, e.g. for openssl
buildrecipe(basedir, archList)
if install is not None:
print("Running install for %s"%(name,))
runCommand('{ ' + install + ' ;} 2>&1')
print("Done %s"%(name,))
print("")
os.chdir(curdir)
def buildLibraries():
"""
Build our dependencies into $WORKDIR/libraries/usr/local
"""
print("")
print("Building required libraries")
print("")
universal = os.path.join(WORKDIR, 'libraries')
os.mkdir(universal)
os.makedirs(os.path.join(universal, 'usr', 'local', 'lib'))
os.makedirs(os.path.join(universal, 'usr', 'local', 'include'))
for recipe in library_recipes():
buildRecipe(recipe, universal, ARCHLIST)
def buildPythonDocs():
# This stores the documentation as Resources/English.lproj/Documentation
# inside the framework. pydoc and IDLE will pick it up there.
print("Install python documentation")
rootDir = os.path.join(WORKDIR, '_root')
buildDir = os.path.join('../../Doc')
docdir = os.path.join(rootDir, 'pydocs')
curDir = os.getcwd()
os.chdir(buildDir)
runCommand('make clean')
# Create virtual environment for docs builds with blurb and sphinx
runCommand('make venv')
runCommand('make html PYTHON=venv/bin/python')
os.chdir(curDir)
if not os.path.exists(docdir):
os.mkdir(docdir)
os.rename(os.path.join(buildDir, 'build', 'html'), docdir)
def buildPython():
print("Building a universal python for %s architectures" % UNIVERSALARCHS)
buildDir = os.path.join(WORKDIR, '_bld', 'python')
rootDir = os.path.join(WORKDIR, '_root')
if os.path.exists(buildDir):
shutil.rmtree(buildDir)
if os.path.exists(rootDir):
shutil.rmtree(rootDir)
os.makedirs(buildDir)
os.makedirs(rootDir)
os.makedirs(os.path.join(rootDir, 'empty-dir'))
curdir = os.getcwd()
os.chdir(buildDir)
# Extract the version from the configure file, needed to calculate
# several paths.
version = getVersion()
# Since the extra libs are not in their installed framework location
# during the build, augment the library path so that the interpreter
# will find them during its extension import sanity checks.
os.environ['DYLD_LIBRARY_PATH'] = os.path.join(WORKDIR,
'libraries', 'usr', 'local', 'lib')
print("Running configure...")
runCommand("%s -C --enable-framework --enable-universalsdk=/ "
"--with-universal-archs=%s "
"%s "
"%s "
"%s "
"%s "
"LDFLAGS='-g -L%s/libraries/usr/local/lib' "
"CFLAGS='-g -I%s/libraries/usr/local/include' 2>&1"%(
shellQuote(os.path.join(SRCDIR, 'configure')),
UNIVERSALARCHS,
(' ', '--with-computed-gotos ')[PYTHON_3],
(' ', '--without-ensurepip ')[PYTHON_3],
(' ', "--with-tcltk-includes='-I%s/libraries/usr/local/include'"%(
shellQuote(WORKDIR)[1:-1],))[internalTk()],
(' ', "--with-tcltk-libs='-L%s/libraries/usr/local/lib -ltcl8.6 -ltk8.6'"%(
shellQuote(WORKDIR)[1:-1],))[internalTk()],
shellQuote(WORKDIR)[1:-1],
shellQuote(WORKDIR)[1:-1]))
# Look for environment value BUILDINSTALLER_BUILDPYTHON_MAKE_EXTRAS
# and, if defined, append its value to the make command. This allows
# us to pass in version control tags, like GITTAG, to a build from a
# tarball rather than from a vcs checkout, thus eliminating the need
# to have a working copy of the vcs program on the build machine.
#
# A typical use might be:
# export BUILDINSTALLER_BUILDPYTHON_MAKE_EXTRAS=" \
# GITVERSION='echo 123456789a' \
# GITTAG='echo v3.6.0' \
# GITBRANCH='echo 3.6'"
make_extras = os.getenv("BUILDINSTALLER_BUILDPYTHON_MAKE_EXTRAS")
if make_extras:
make_cmd = "make " + make_extras
else:
make_cmd = "make"
print("Running " + make_cmd)
runCommand(make_cmd)
print("Running make install")
runCommand("make install DESTDIR=%s"%(
shellQuote(rootDir)))
print("Running make frameworkinstallextras")
runCommand("make frameworkinstallextras DESTDIR=%s"%(
shellQuote(rootDir)))
del os.environ['DYLD_LIBRARY_PATH']
print("Copying required shared libraries")
if os.path.exists(os.path.join(WORKDIR, 'libraries', 'Library')):
build_lib_dir = os.path.join(
WORKDIR, 'libraries', 'Library', 'Frameworks',
'Python.framework', 'Versions', getVersion(), 'lib')
fw_lib_dir = os.path.join(
WORKDIR, '_root', 'Library', 'Frameworks',
'Python.framework', 'Versions', getVersion(), 'lib')
if internalTk():
# move Tcl and Tk pkgconfig files
runCommand("mv %s/pkgconfig/* %s/pkgconfig"%(
shellQuote(build_lib_dir),
shellQuote(fw_lib_dir) ))
runCommand("rm -r %s/pkgconfig"%(
shellQuote(build_lib_dir), ))
runCommand("mv %s/* %s"%(
shellQuote(build_lib_dir),
shellQuote(fw_lib_dir) ))
frmDir = os.path.join(rootDir, 'Library', 'Frameworks', 'Python.framework')
frmDirVersioned = os.path.join(frmDir, 'Versions', version)
path_to_lib = os.path.join(frmDirVersioned, 'lib', 'python%s'%(version,))
# create directory for OpenSSL certificates
sslDir = os.path.join(frmDirVersioned, 'etc', 'openssl')
os.makedirs(sslDir)
print("Fix file modes")
gid = grp.getgrnam('admin').gr_gid
shared_lib_error = False
for dirpath, dirnames, filenames in os.walk(frmDir):
for dn in dirnames:
os.chmod(os.path.join(dirpath, dn), STAT_0o775)
os.chown(os.path.join(dirpath, dn), -1, gid)
for fn in filenames:
if os.path.islink(fn):
continue
# "chmod g+w $fn"
p = os.path.join(dirpath, fn)
st = os.stat(p)
os.chmod(p, stat.S_IMODE(st.st_mode) | stat.S_IWGRP)
os.chown(p, -1, gid)
if fn in EXPECTED_SHARED_LIBS:
# check to see that this file was linked with the
# expected library path and version
data = captureCommand("otool -L %s" % shellQuote(p))
for sl in EXPECTED_SHARED_LIBS[fn]:
if ("\t%s " % sl) not in data:
print("Expected shared lib %s was not linked with %s"
% (sl, p))
shared_lib_error = True
if shared_lib_error:
fatal("Unexpected shared library errors.")
if PYTHON_3:
LDVERSION=None
VERSION=None
ABIFLAGS=None
fp = open(os.path.join(buildDir, 'Makefile'), 'r')
for ln in fp:
if ln.startswith('VERSION='):
VERSION=ln.split()[1]
if ln.startswith('ABIFLAGS='):
ABIFLAGS=ln.split()[1]
if ln.startswith('LDVERSION='):
LDVERSION=ln.split()[1]
fp.close()
LDVERSION = LDVERSION.replace('$(VERSION)', VERSION)
LDVERSION = LDVERSION.replace('$(ABIFLAGS)', ABIFLAGS)
config_suffix = '-' + LDVERSION
if getVersionMajorMinor() >= (3, 6):
config_suffix = config_suffix + '-darwin'
else:
config_suffix = '' # Python 2.x
# We added some directories to the search path during the configure
# phase. Remove those because those directories won't be there on
# the end-users system. Also remove the directories from _sysconfigdata.py
# (added in 3.3) if it exists.
include_path = '-I%s/libraries/usr/local/include' % (WORKDIR,)
lib_path = '-L%s/libraries/usr/local/lib' % (WORKDIR,)
# fix Makefile
path = os.path.join(path_to_lib, 'config' + config_suffix, 'Makefile')
fp = open(path, 'r')
data = fp.read()
fp.close()
for p in (include_path, lib_path):
data = data.replace(" " + p, '')
data = data.replace(p + " ", '')
fp = open(path, 'w')
fp.write(data)
fp.close()
# fix _sysconfigdata
#
# TODO: make this more robust! test_sysconfig_module of
# distutils.tests.test_sysconfig.SysconfigTestCase tests that
# the output from get_config_var in both sysconfig and
# distutils.sysconfig is exactly the same for both CFLAGS and
# LDFLAGS. The fixing up is now complicated by the pretty
# printing in _sysconfigdata.py. Also, we are using the
# pprint from the Python running the installer build which
# may not cosmetically format the same as the pprint in the Python
# being built (and which is used to originally generate
# _sysconfigdata.py).
import pprint
if getVersionMajorMinor() >= (3, 6):
# XXX this is extra-fragile
path = os.path.join(path_to_lib, '_sysconfigdata_m_darwin_darwin.py')
else:
path = os.path.join(path_to_lib, '_sysconfigdata.py')
fp = open(path, 'r')
data = fp.read()
fp.close()
# create build_time_vars dict
exec(data)
vars = {}
for k, v in build_time_vars.items():
if type(v) == type(''):
for p in (include_path, lib_path):
v = v.replace(' ' + p, '')
v = v.replace(p + ' ', '')
vars[k] = v
fp = open(path, 'w')
# duplicated from sysconfig._generate_posix_vars()
fp.write('# system configuration generated and used by'
' the sysconfig module\n')
fp.write('build_time_vars = ')
pprint.pprint(vars, stream=fp)
fp.close()
# Add symlinks in /usr/local/bin, using relative links
usr_local_bin = os.path.join(rootDir, 'usr', 'local', 'bin')
to_framework = os.path.join('..', '..', '..', 'Library', 'Frameworks',
'Python.framework', 'Versions', version, 'bin')
if os.path.exists(usr_local_bin):
shutil.rmtree(usr_local_bin)
os.makedirs(usr_local_bin)
for fn in os.listdir(
os.path.join(frmDir, 'Versions', version, 'bin')):
os.symlink(os.path.join(to_framework, fn),
os.path.join(usr_local_bin, fn))
os.chdir(curdir)
if PYTHON_3:
# Remove the 'Current' link, that way we don't accidentally mess
# with an already installed version of python 2
os.unlink(os.path.join(rootDir, 'Library', 'Frameworks',
'Python.framework', 'Versions', 'Current'))
def patchFile(inPath, outPath):
data = fileContents(inPath)
data = data.replace('$FULL_VERSION', getFullVersion())
data = data.replace('$VERSION', getVersion())
data = data.replace('$MACOSX_DEPLOYMENT_TARGET', ''.join((DEPTARGET, ' or later')))
data = data.replace('$ARCHITECTURES', ", ".join(universal_opts_map[UNIVERSALARCHS]))
data = data.replace('$INSTALL_SIZE', installSize())
data = data.replace('$THIRD_PARTY_LIBS', "\\\n".join(THIRD_PARTY_LIBS))
# This one is not handy as a template variable
data = data.replace('$PYTHONFRAMEWORKINSTALLDIR', '/Library/Frameworks/Python.framework')
fp = open(outPath, 'w')
fp.write(data)
fp.close()
def patchScript(inPath, outPath):
major, minor = getVersionMajorMinor()
data = fileContents(inPath)
data = data.replace('@PYMAJOR@', str(major))
data = data.replace('@PYVER@', getVersion())
fp = open(outPath, 'w')
fp.write(data)
fp.close()
os.chmod(outPath, STAT_0o755)
def packageFromRecipe(targetDir, recipe):
curdir = os.getcwd()
try:
# The major version (such as 2.5) is included in the package name
# because having two version of python installed at the same time is
# common.
pkgname = '%s-%s'%(recipe['name'], getVersion())
srcdir = recipe.get('source')
pkgroot = recipe.get('topdir', srcdir)
postflight = recipe.get('postflight')
readme = textwrap.dedent(recipe['readme'])
isRequired = recipe.get('required', True)
print("- building package %s"%(pkgname,))
# Substitute some variables
textvars = dict(
VER=getVersion(),
FULLVER=getFullVersion(),
)
readme = readme % textvars
if pkgroot is not None:
pkgroot = pkgroot % textvars
else:
pkgroot = '/'
if srcdir is not None:
srcdir = os.path.join(WORKDIR, '_root', srcdir[1:])
srcdir = srcdir % textvars
if postflight is not None:
postflight = os.path.abspath(postflight)
packageContents = os.path.join(targetDir, pkgname + '.pkg', 'Contents')
os.makedirs(packageContents)
if srcdir is not None:
os.chdir(srcdir)
runCommand("pax -wf %s . 2>&1"%(shellQuote(os.path.join(packageContents, 'Archive.pax')),))
runCommand("gzip -9 %s 2>&1"%(shellQuote(os.path.join(packageContents, 'Archive.pax')),))
runCommand("mkbom . %s 2>&1"%(shellQuote(os.path.join(packageContents, 'Archive.bom')),))
fn = os.path.join(packageContents, 'PkgInfo')
fp = open(fn, 'w')
fp.write('pmkrpkg1')
fp.close()
rsrcDir = os.path.join(packageContents, "Resources")
os.mkdir(rsrcDir)
fp = open(os.path.join(rsrcDir, 'ReadMe.txt'), 'w')
fp.write(readme)
fp.close()
if postflight is not None:
patchScript(postflight, os.path.join(rsrcDir, 'postflight'))
vers = getFullVersion()
major, minor = getVersionMajorMinor()
pl = Plist(
CFBundleGetInfoString="Python.%s %s"%(pkgname, vers,),
CFBundleIdentifier='org.python.Python.%s'%(pkgname,),
CFBundleName='Python.%s'%(pkgname,),
CFBundleShortVersionString=vers,
IFMajorVersion=major,
IFMinorVersion=minor,
IFPkgFormatVersion=0.10000000149011612,
IFPkgFlagAllowBackRev=False,
IFPkgFlagAuthorizationAction="RootAuthorization",
IFPkgFlagDefaultLocation=pkgroot,
IFPkgFlagFollowLinks=True,
IFPkgFlagInstallFat=True,
IFPkgFlagIsRequired=isRequired,
IFPkgFlagOverwritePermissions=False,
IFPkgFlagRelocatable=False,
IFPkgFlagRestartAction="NoRestart",
IFPkgFlagRootVolumeOnly=True,
IFPkgFlagUpdateInstalledLangauges=False,
)
writePlist(pl, os.path.join(packageContents, 'Info.plist'))
pl = Plist(
IFPkgDescriptionDescription=readme,
IFPkgDescriptionTitle=recipe.get('long_name', "Python.%s"%(pkgname,)),
IFPkgDescriptionVersion=vers,
)
writePlist(pl, os.path.join(packageContents, 'Resources', 'Description.plist'))
finally:
os.chdir(curdir)
def makeMpkgPlist(path):
vers = getFullVersion()
major, minor = getVersionMajorMinor()
pl = Plist(
CFBundleGetInfoString="Python %s"%(vers,),
CFBundleIdentifier='org.python.Python',
CFBundleName='Python',
CFBundleShortVersionString=vers,
IFMajorVersion=major,
IFMinorVersion=minor,
IFPkgFlagComponentDirectory="Contents/Packages",
IFPkgFlagPackageList=[
dict(
IFPkgFlagPackageLocation='%s-%s.pkg'%(item['name'], getVersion()),
IFPkgFlagPackageSelection=item.get('selected', 'selected'),
)
for item in pkg_recipes()
],
IFPkgFormatVersion=0.10000000149011612,
IFPkgFlagBackgroundScaling="proportional",
IFPkgFlagBackgroundAlignment="left",
IFPkgFlagAuthorizationAction="RootAuthorization",
)
writePlist(pl, path)
def buildInstaller():
# Zap all compiled files
for dirpath, _, filenames in os.walk(os.path.join(WORKDIR, '_root')):
for fn in filenames:
if fn.endswith('.pyc') or fn.endswith('.pyo'):
os.unlink(os.path.join(dirpath, fn))
outdir = os.path.join(WORKDIR, 'installer')
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
pkgroot = os.path.join(outdir, 'Python.mpkg', 'Contents')
pkgcontents = os.path.join(pkgroot, 'Packages')
os.makedirs(pkgcontents)
for recipe in pkg_recipes():
packageFromRecipe(pkgcontents, recipe)
rsrcDir = os.path.join(pkgroot, 'Resources')
fn = os.path.join(pkgroot, 'PkgInfo')
fp = open(fn, 'w')
fp.write('pmkrpkg1')
fp.close()
os.mkdir(rsrcDir)
makeMpkgPlist(os.path.join(pkgroot, 'Info.plist'))
pl = Plist(
IFPkgDescriptionTitle="Python",
IFPkgDescriptionVersion=getVersion(),
)
writePlist(pl, os.path.join(pkgroot, 'Resources', 'Description.plist'))
for fn in os.listdir('resources'):
if fn == '.svn': continue
if fn.endswith('.jpg'):
shutil.copy(os.path.join('resources', fn), os.path.join(rsrcDir, fn))
else:
patchFile(os.path.join('resources', fn), os.path.join(rsrcDir, fn))
def installSize(clear=False, _saved=[]):
if clear:
del _saved[:]
if not _saved:
data = captureCommand("du -ks %s"%(
shellQuote(os.path.join(WORKDIR, '_root'))))
_saved.append("%d"%((0.5 + (int(data.split()[0]) / 1024.0)),))
return _saved[0]
def buildDMG():
"""
Create DMG containing the rootDir.
"""
outdir = os.path.join(WORKDIR, 'diskimage')
if os.path.exists(outdir):
shutil.rmtree(outdir)
imagepath = os.path.join(outdir,
'python-%s-macosx%s'%(getFullVersion(),DEPTARGET))
if INCLUDE_TIMESTAMP:
imagepath = imagepath + '-%04d-%02d-%02d'%(time.localtime()[:3])
imagepath = imagepath + '.dmg'
os.mkdir(outdir)
volname='Python %s'%(getFullVersion())
runCommand("hdiutil create -format UDRW -volname %s -srcfolder %s %s"%(
shellQuote(volname),
shellQuote(os.path.join(WORKDIR, 'installer')),
shellQuote(imagepath + ".tmp.dmg" )))
if not os.path.exists(os.path.join(WORKDIR, "mnt")):
os.mkdir(os.path.join(WORKDIR, "mnt"))
runCommand("hdiutil attach %s -mountroot %s"%(
shellQuote(imagepath + ".tmp.dmg"), shellQuote(os.path.join(WORKDIR, "mnt"))))
# Custom icon for the DMG, shown when the DMG is mounted.
shutil.copy("../Icons/Disk Image.icns",
os.path.join(WORKDIR, "mnt", volname, ".VolumeIcon.icns"))
runCommand("SetFile -a C %s/"%(
shellQuote(os.path.join(WORKDIR, "mnt", volname)),))
runCommand("hdiutil detach %s"%(shellQuote(os.path.join(WORKDIR, "mnt", volname))))
setIcon(imagepath + ".tmp.dmg", "../Icons/Disk Image.icns")
runCommand("hdiutil convert %s -format UDZO -o %s"%(
shellQuote(imagepath + ".tmp.dmg"), shellQuote(imagepath)))
setIcon(imagepath, "../Icons/Disk Image.icns")
os.unlink(imagepath + ".tmp.dmg")
return imagepath
def setIcon(filePath, icnsPath):
"""
Set the custom icon for the specified file or directory.
"""
dirPath = os.path.normpath(os.path.dirname(__file__))
toolPath = os.path.join(dirPath, "seticon.app/Contents/MacOS/seticon")
if not os.path.exists(toolPath) or os.stat(toolPath).st_mtime < os.stat(dirPath + '/seticon.m').st_mtime:
# NOTE: The tool is created inside an .app bundle, otherwise it won't work due
# to connections to the window server.
appPath = os.path.join(dirPath, "seticon.app/Contents/MacOS")
if not os.path.exists(appPath):
os.makedirs(appPath)
runCommand("cc -o %s %s/seticon.m -framework Cocoa"%(
shellQuote(toolPath), shellQuote(dirPath)))
runCommand("%s %s %s"%(shellQuote(os.path.abspath(toolPath)), shellQuote(icnsPath),
shellQuote(filePath)))
def main():
# First parse options and check if we can perform our work
parseOptions()
checkEnvironment()
os.environ['MACOSX_DEPLOYMENT_TARGET'] = DEPTARGET
os.environ['CC'] = CC
os.environ['CXX'] = CXX
if os.path.exists(WORKDIR):
shutil.rmtree(WORKDIR)
os.mkdir(WORKDIR)
os.environ['LC_ALL'] = 'C'
# Then build third-party libraries such as sleepycat DB4.
buildLibraries()
# Now build python itself
buildPython()
# And then build the documentation
# Remove the Deployment Target from the shell
# environment, it's no longer needed and
# an unexpected build target can cause problems
# when Sphinx and its dependencies need to
# be (re-)installed.
del os.environ['MACOSX_DEPLOYMENT_TARGET']
buildPythonDocs()
# Prepare the applications folder
folder = os.path.join(WORKDIR, "_root", "Applications", "Python %s"%(
getVersion(),))
fn = os.path.join(folder, "License.rtf")
patchFile("resources/License.rtf", fn)
fn = os.path.join(folder, "ReadMe.rtf")
patchFile("resources/ReadMe.rtf", fn)
fn = os.path.join(folder, "Update Shell Profile.command")
patchScript("scripts/postflight.patch-profile", fn)
fn = os.path.join(folder, "Install Certificates.command")
patchScript("resources/install_certificates.command", fn)
os.chmod(folder, STAT_0o755)
setIcon(folder, "../Icons/Python Folder.icns")
# Create the installer
buildInstaller()
# And copy the readme into the directory containing the installer
patchFile('resources/ReadMe.rtf',
os.path.join(WORKDIR, 'installer', 'ReadMe.rtf'))
# Ditto for the license file.
patchFile('resources/License.rtf',
os.path.join(WORKDIR, 'installer', 'License.rtf'))
fp = open(os.path.join(WORKDIR, 'installer', 'Build.txt'), 'w')
fp.write("# BUILD INFO\n")
fp.write("# Date: %s\n" % time.ctime())
fp.write("# By: %s\n" % pwd.getpwuid(os.getuid()).pw_gecos)
fp.close()
# And copy it to a DMG
buildDMG()
if __name__ == "__main__":
main()
|
the-stack_0_24298 | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import plotly.graph_objs as go
import requests
import pandas as pd
import numpy as np
import datetime
from app import app
from uszipcode import SearchEngine
from dash_dangerously_set_inner_html import DangerouslySetInnerHTML as rawHtml
from . import BASE_URL
TITLE = html.Div(html.H1('Search Earthquakes'), style={'text-align': 'center',
'margin-top': '40px'})
source_col = dbc.Col([
dcc.Markdown('Choose a source'),
html.Div([
dcc.Dropdown(
id='Searchsource',
options=[
{'label': 'USGS', 'value': 'USGS'},
{'label': 'EMSC', 'value': 'EMSC'},
],
value='USGS'
), # ends dropdown
]) # ends div
]) # ends col1
type_col = dbc.Col([
dcc.Markdown('Search By:'),
html.Div(
dcc.Dropdown(
id='searchtype',
options=[
{'label': 'Zipcode', 'value': 'zip'},
{'label': 'Time', 'value': 'time'}
],
value='zip'
)
)
]) # ends col2
search_fields = [
dbc.Col([dcc.Markdown('Enter Zipcode:'),
dcc.Input(
id='frame',
type='number',
minLength=5,
maxLength=5,
value=10001
)]),
dbc.Col([
dcc.Markdown('Select Distance:'),
dcc.Slider(
id='amount',
min=0,
max=100,
step=10,
value=20
),
html.Div(id='amountCol')]),
]
# sets the two search parameters based on selection
@app.callback(
dash.dependencies.Output('searchArea', 'children'),
[dash.dependencies.Input('searchtype', 'value')])
def search_type(search):
if search == 'time':
search_fields = [
dbc.Col([dcc.Markdown('Select Timeframe:'),
dcc.Dropdown(
id='frame',
options=[
{'label': 'Last Quake', 'value': 'Quake'},
{'label': 'Last Hour', 'value': '/hour'},
{'label': 'Last Day', 'value': '/day'},
{'label': 'Last Week', 'value': '/week'},
{'label': 'Last Month', 'value': '/month'}
],
value='Quake'
)]),
dbc.Col([dcc.Markdown('Minimum Magnitude:'),
dcc.Slider(
id='amount',
min=0,
max=10,
step=.5,
value=5.5
),
html.Div(id='amountCol')]),
]
else:
search_fields = [
dbc.Col([dcc.Markdown('Enter Zipcode:'),
dcc.Input(
id='frame',
type='number',
minLength=5,
maxLength=5,
value=10001
)]),
dbc.Col([
dcc.Markdown('Select Distance:'),
dcc.Slider(
id='amount',
min=0,
max=100,
step=10,
value=20
),
html.Div(id='amountCol')]),
]
return search_fields
@app.callback(
dash.dependencies.Output('amountCol', 'children'),
[dash.dependencies.Input('amount', 'value'),
dash.dependencies.Input('searchtype', 'value')])
def place_ammount(amount, search):
if search == 'Time':
return dcc.Markdown(f'Minimum Magnitude: {amount}')
else:
return dcc.Markdown(f'Maximum Distance: {amount}')
@app.callback(
dash.dependencies.Output('searchResults', 'children'),
[dash.dependencies.Input('searchtype', 'value'),
dash.dependencies.Input('frame', 'value'),
dash.dependencies.Input('amount', 'value'),
dash.dependencies.Input('Searchsource', 'value')])
def search_results(search, frame, amount, source):
'''
Search is the type os search
Frame is either zip code or time Frame
Amount is either distance or min mag
source is USGS or EMSC
'''
if search == 'time':
try:
float(frame)
except:
return search_time(frame, amount, source)
else:
try:
float(frame)
return search_zip(frame, amount, source)
except:
return None
def search_time(frame, amount, source):
if frame == 'Quake':
api_url = BASE_URL + f'last{frame}/{source}/{float(amount)}'
else:
api_url = BASE_URL + f'last/{source}/{frame}/{float(amount)}'
print(api_url)
data = requests.get(api_url)
if data.json()['num_quakes'] != 0:
df = pd.DataFrame(data.json()['message']) if frame != 'Quake' else \
pd.DataFrame(data.json()['message'], index=[0])
if frame == 'Quake':
title = f'Last Quake over {amount} in {source}'
else:
title = f"Quakes over {amount} in the last {frame.strip('last/')} in {source}"
return display_table(df, title)
else:
if frame == 'Quake':
display_text = f'No Quakes over {amount} to display in {source}'
else:
display_text = f"No Quakes over {amount} in the last {frame.strip('last/')} to display in {source}"
return dcc.Markdown(display_text)
def search_zip(zip, dist, source):
location_search = SearchEngine(simple_zipcode=True)
location = location_search.by_zipcode(str(zip))
lat = location.to_dict()['lat']
lon = location.to_dict()['lng']
if lat == None:
return dcc.Markdown(f'{zip} is not a valid US zip code')
api_url = BASE_URL + f'history/{source}/{lat},{lon},{dist}'
quakes = requests.get(api_url)
if quakes.json()['num_quakes'] != 0:
df = pd.DataFrame(quakes.json()['message'])
title = f'Quakes within {dist} KM of {zip} from {source}'
return display_table(df, title)
else:
return dcc.Markdown(f'No Quakes have occured within {dist} KM of {zip} in {source}')
def display_table(df, title):
html = '<table cellpadding=20><tr>'
df = df[['id', 'time', 'place', 'lat', 'lon', 'mag']]
col_names = df.columns
for col in col_names:
html = html + f'<td>{col.upper()}</td>'
html += '</tr>'
for row in df.iterrows():
print(row[1]['id'])
html += f"<tr class='hoverable'><td>{row[1]['id']}</td><td>{row[1]['time']}</td><td>{row[1]['place']}</td><td>{row[1]['lat']}</td><td>{row[1]['lon']}</td><td>{row[1]['mag']}</td></tr>"
return rawHtml(html)
hightlight_on_hover = '''<style>
.hoverable:hover {
background-color: #ffff99;
}
</style>
'''
layout = dbc.Col([
TITLE,
dbc.Row([type_col, source_col]),
dbc.Row(search_fields, id='searchArea', style={'width': '100%'}),
dbc.Row(id='searchResults'),
rawHtml(hightlight_on_hover)
])
|
the-stack_0_24299 | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command too pause execution until database is available"""
def handle(self, *arg, **kwargs):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavaliable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
|
the-stack_0_24301 | # Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tempest test-case to test fqname ID objects using RBAC roles
"""
from oslo_log import log as logging
from patrole_tempest_plugin import rbac_rule_validation
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.decorators import idempotent_id
from tungsten_tempest_plugin.tests.api.contrail import rbac_base
CONF = config.CONF
LOG = logging.getLogger(__name__)
class FqnameIdTest(rbac_base.BaseContrailTest):
"""Test class to test Fqname ID objects using RBAC roles"""
@classmethod
def resource_setup(cls):
super(FqnameIdTest, cls).resource_setup()
# Create network to test fqname and uuid conversion
net_name = data_utils.rand_name('rbac-fq-network')
fq_name = ['default-domain', cls.tenant_name, net_name]
post_body = {'parent_type': 'project', 'fq_name': fq_name}
body = cls.vn_client.create_virtual_networks(**post_body)
cls.network = body['virtual-network']
cls.type = 'virtual-network'
@classmethod
def resource_cleanup(cls):
cls._try_delete_resource(cls.vn_client.delete_virtual_network,
cls.network['uuid'])
super(FqnameIdTest, cls).resource_cleanup()
@rbac_rule_validation.action(service="Contrail",
rules=["fqname_to_id"])
@idempotent_id('1fc1350b-3146-49bc-9af5-a61a98b55541')
def test_fqname_to_id(self):
"""test method for fqname to id rules objects"""
with self.rbac_utils.override_role(self):
self.fq_client.fqname_to_id(fq_name=self.network['fq_name'],
type=self.type)
@rbac_rule_validation.action(service="Contrail",
rules=["id_to_fqname"])
@idempotent_id('ecdd77d7-8508-4639-86cd-b97907b363ff')
def test_id_to_fqname(self):
"""test method for id to fqname rules objects"""
with self.rbac_utils.override_role(self):
self.fq_client.id_to_fqname(uuid=self.network['uuid'])
|
the-stack_0_24304 | import os
import argparse
from solver import Solver
def main(config):
# path for models
if not os.path.exists(config.model_save_path):
os.makedirs(config.model_save_path)
# import data loader
if config.dataset == 'mtat':
from data_loader.mtat_loader import get_audio_loader
elif config.dataset == 'msd':
from data_loader.msd_loader import get_audio_loader
elif config.dataset == 'jamendo':
from data_loader.jamendo_loader import get_audio_loader
# audio length
if config.model_type == 'fcn' or config.model_type == 'crnn':
config.input_length = 29 * 16000
elif config.model_type == 'musicnn':
config.input_length = 3 * 16000
elif config.model_type in ['sample', 'se', 'short', 'short_res']:
config.input_length = 59049
elif config.model_type == 'hcnn':
config.input_length = 80000
elif config.model_type == 'attention':
config.input_length = 15 * 16000
# get data loder
train_loader = get_audio_loader(config.data_path,
config.batch_size,
split='TRAIN',
input_length=config.input_length,
num_workers=config.num_workers)
solver = Solver(train_loader, config)
solver.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num_workers', type=int, default=0)
parser.add_argument('--dataset', type=str, default='mtat', choices=['mtat', 'msd', 'jamendo'])
parser.add_argument('--model_type', type=str, default='fcn',
choices=['fcn', 'musicnn', 'crnn', 'sample', 'se', 'short', 'short_res', 'attention', 'hcnn'])
parser.add_argument('--n_epochs', type=int, default=200)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--use_tensorboard', type=int, default=1)
parser.add_argument('--model_save_path', type=str, default='./../models')
parser.add_argument('--model_load_path', type=str, default='.')
parser.add_argument('--data_path', type=str, default='./data')
parser.add_argument('--log_step', type=int, default=20)
config = parser.parse_args()
print(config)
main(config)
|
the-stack_0_24305 | from lib.target_menu.colors import palette
def update(menu, control, cl):
i = 0
for i in range(len(menu)):
if control["pos1"] == i:
print(f"{cl[2]}{menu[i]} {palette.colors[15][1]}")
else:
print(f"{menu[i]}")
print("\n") |
the-stack_0_24308 | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from xossynchronizer.steps import ansiblesyncstep
def escape(s):
s = s.replace("\n", r"\n").replace('"', r"\"")
return s
class SyncInstances(ansiblesyncstep.AnsibleSyncStep):
requested_interval = 0
# This observes is intentionally a list of one string, to test steps where observes is a list of strings.
observes = ["Instance"]
playbook = "sync_instances.yaml"
def fetch_pending(self, deletion=False):
objs = super(SyncInstances, self).fetch_pending(deletion)
objs = [x for x in objs if x.isolation == "vm"]
return objs
def map_sync_inputs(self, instance):
inputs = {}
metadata_update = {}
fields = {"name": instance.name, "delete": False}
return fields
def map_sync_outputs(self, instance, res):
instance.save()
def map_delete_inputs(self, instance):
input = {
"endpoint": "endpoint",
"admin_user": "admin_user",
"admin_password": "admin_password",
"project_name": "project_name",
"tenant": "tenant",
"tenant_description": "tenant_description",
"name": instance.name,
"ansible_tag": "ansible_tag",
"delete": True,
}
return input
|
the-stack_0_24310 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class AddLabelDialog(QDialog):
"""A dialog for adding a new label."""
def __init__(self, label_configs, edit_label=None, parent=None):
super(AddLabelDialog, self).__init__(parent)
self._edit_label = edit_label
self._new_label = []
self._label_configs = label_configs
self._is_valid_label = False
self._init_gui()
self._create_actions()
def _init_gui(self):
label_label = QLabel("Label")
self.label_edit = QLineEdit()
index_label = QLabel("Index")
self.index_edit = QLineEdit()
regx = QRegExp("[0-9]+$")
validator = QRegExpValidator(regx, self.index_edit)
self.index_edit.setValidator(validator)
self.index_edit.setEnabled(self._edit_label is None)
color_label = QLabel("Color")
self.color_button = ColorButton()
grid_layout = QGridLayout()
grid_layout.addWidget(label_label, 0, 0)
grid_layout.addWidget(self.label_edit, 0, 1)
grid_layout.addWidget(index_label, 1, 0)
grid_layout.addWidget(self.index_edit, 1, 1)
grid_layout.addWidget(color_label, 2, 0)
grid_layout.addWidget(self.color_button, 2, 1)
self.add_button = QPushButton("Add")
self.cancel_button = QPushButton("Cancel")
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.add_button)
hbox_layout.addWidget(self.cancel_button)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(grid_layout)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
if self._edit_label:
self.index_edit.setText(self._edit_label[0])
self.label_edit.setText(self._edit_label[1])
self.color_button._set_current_color(self._edit_label[2])
self.add_button.setText("OK")
def _create_actions(self):
self.add_button.clicked.connect(self._add_label)
self.cancel_button.clicked.connect(self.done)
def _add_label(self):
self._new_label = []
label = str(self.label_edit.text())
if not label:
QMessageBox.critical(self, "No label name",
"Please speicify your label name.")
return
index = self.index_edit.text()
if not index:
QMessageBox.critical(self, "No index",
"Please specify a index for your label.")
return
index = int(str(self.index_edit.text()))
color = self.color_button.get_current_color()
if not color.isValid():
QMessageBox.critical(self, "Color invalid",
"Please choose a valid color for your label.")
return
label = str(label)
self._new_label.append(index)
self._new_label.append(str(label.replace(" ", "_")))
self._new_label.append(color)
if self._new_label[1] in self._label_configs.get_label_list():
if self._edit_label is None or self._edit_label[1] != self._new_label[1]:
QMessageBox.warning(self, "Add label",
"The label %s has exsited!" % self._new_label[1],
QMessageBox.Yes)
return
if not self._edit_label:
if self._new_label[0] in self._label_configs.get_index_list():
QMessageBox.warning(self, "Add label",
"The index %s has exsited!" % self._new_label[0],
QMessageBox.Yes)
return
self._is_valid_label = True
self.done(0)
def get_new_label(self):
if self._is_valid_label:
return self._new_label
else:
return None
class ColorButton(QPushButton):
"""Button to choose color from a color dialog."""
default_color = QColor(255, 0, 0)
icon_size = QSize(32, 32)
color_changed = pyqtSignal(QColor)
def __init__(self, init_color=None, parent=None):
super(ColorButton, self).__init__(parent)
if not init_color:
init_color = self.default_color
self.current_color = init_color
self._update_icon()
self.clicked.connect(self._choose_color)
def _update_icon(self):
if self.current_color:
icon_image = QImage(self.icon_size, QImage.Format_RGB888)
icon_image.fill(self.current_color.rgb())
icon_image = icon_image.rgbSwapped()
icon_pm = QPixmap.fromImage(icon_image)
self.setIcon(QIcon(icon_pm))
def _choose_color(self):
color = QColorDialog.getColor(self.current_color, self)
if color.isValid():
self._set_current_color(color)
def _set_current_color(self, color):
self.set_current_color(color)
self.color_changed.emit(color)
def set_current_color(self, color):
self.current_color = color
self._update_icon()
def get_current_color(self):
return self.current_color |
the-stack_0_24311 | from collections import OrderedDict
import copy
import os
import time
from lxml import etree as ET
import networkx as nx
import numpy as np
import PIL
import trimesh
import six
from .utils import (parse_origin, unparse_origin, get_filename, load_meshes,
configure_origin)
class URDFType(object):
"""Abstract base class for all URDF types.
This has useful class methods for automatic parsing/unparsing
of XML trees.
There are three overridable class variables:
- ``_ATTRIBS`` - This is a dictionary mapping attribute names to a tuple,
``(type, required)`` where ``type`` is the Python type for the
attribute and ``required`` is a boolean stating whether the attribute
is required to be present.
- ``_ELEMENTS`` - This is a dictionary mapping element names to a tuple,
``(type, required, multiple)`` where ``type`` is the Python type for the
element, ``required`` is a boolean stating whether the element
is required to be present, and ``multiple`` is a boolean indicating
whether multiple elements of this type could be present.
Elements are child nodes in the XML tree, and their type must be a
subclass of :class:`.URDFType`.
- ``_TAG`` - This is a string that represents the XML tag for the node
containing this type of object.
"""
_ATTRIBS = {} # Map from attrib name to (type, required)
_ELEMENTS = {} # Map from element name to (type, required, multiple)
_TAG = '' # XML tag for this element
def __init__(self):
pass
@classmethod
def _parse_attrib(cls, val_type, val):
"""Parse an XML attribute into a python value.
Parameters
----------
val_type : :class:`type`
The type of value to create.
val : :class:`object`
The value to parse.
Returns
-------
val : :class:`object`
The parsed attribute.
"""
if val_type == np.ndarray:
val = np.fromstring(val, sep=' ')
else:
val = val_type(val)
return val
@classmethod
def _parse_simple_attribs(cls, node):
"""Parse all attributes in the _ATTRIBS array for this class.
Parameters
----------
node : :class:`lxml.etree.Element`
The node to parse attributes for.
Returns
-------
kwargs : dict
Map from attribute name to value. If the attribute is not
required and is not present, that attribute's name will map to
``None``.
"""
kwargs = {}
for a in cls._ATTRIBS:
t, r = cls._ATTRIBS[a] # t = type, r = required (bool)
if r:
try:
v = cls._parse_attrib(t, node.attrib[a])
except Exception:
raise ValueError(
'Missing required attribute {} when parsing an object '
'of type {}'.format(a, cls.__name__)
)
else:
v = None
if a in node.attrib:
v = cls._parse_attrib(t, node.attrib[a])
kwargs[a] = v
return kwargs
@classmethod
def _parse_simple_elements(cls, node, path):
"""Parse all elements in the _ELEMENTS array from the children of
this node.
Parameters
----------
node : :class:`lxml.etree.Element`
The node to parse children for.
path : str
The string path where the XML file is located (used for resolving
the location of mesh or image files).
Returns
-------
kwargs : dict
Map from element names to the :class:`URDFType` subclass (or list,
if ``multiple`` was set) created for that element.
"""
kwargs = {}
for a in cls._ELEMENTS:
t, r, m = cls._ELEMENTS[a]
if not m:
v = node.find(t._TAG)
if r or v is not None:
v = t._from_xml(v, path)
else:
vs = node.findall(t._TAG)
if len(vs) == 0 and r:
raise ValueError(
'Missing required subelement(s) of type {} when '
'parsing an object of type {}'.format(
t.__name__, cls.__name__
)
)
v = [t._from_xml(n, path) for n in vs]
kwargs[a] = v
return kwargs
@classmethod
def _parse(cls, node, path):
"""Parse all elements and attributes in the _ELEMENTS and _ATTRIBS
arrays for a node.
Parameters
----------
node : :class:`lxml.etree.Element`
The node to parse.
path : str
The string path where the XML file is located (used for resolving
the location of mesh or image files).
Returns
-------
kwargs : dict
Map from names to Python classes created from the attributes
and elements in the class arrays.
"""
kwargs = cls._parse_simple_attribs(node)
kwargs.update(cls._parse_simple_elements(node, path))
return kwargs
@classmethod
def _from_xml(cls, node, path):
"""Create an instance of this class from an XML node.
Parameters
----------
node : :class:`lxml.etree.Element`
The node to parse.
path : str
The string path where the XML file is located (used for resolving
the location of mesh or image files).
Returns
-------
obj : :class:`URDFType`
An instance of this class parsed from the node.
"""
return cls(**cls._parse(node, path))
def _unparse_attrib(self, val_type, val):
"""Convert a Python value into a string for storage in an
XML attribute.
Parameters
----------
val_type : :class:`type`
The type of the Python object.
val : :class:`object`
The actual value.
Returns
-------
s : str
The attribute string.
"""
if val_type == np.ndarray:
val = np.array2string(val)[1:-1]
else:
val = str(val)
return val
def _unparse_simple_attribs(self, node):
"""Convert all Python types from the _ATTRIBS array back into attributes
for an XML node.
Parameters
----------
node : :class:`object`
The XML node to add the attributes to.
"""
for a in self._ATTRIBS:
t, r = self._ATTRIBS[a]
v = getattr(self, a, None)
if r or v is not None:
node.attrib[a] = self._unparse_attrib(t, v)
def _unparse_simple_elements(self, node, path):
"""Unparse all Python types from the _ELEMENTS array back into child
nodes of an XML node.
Parameters
----------
node : :class:`object`
The XML node for this object. Elements will be added as children
of this node.
path : str
The string path where the XML file is being written to (used for
writing out meshes and image files).
"""
for a in self._ELEMENTS:
t, r, m = self._ELEMENTS[a]
v = getattr(self, a, None)
if not m:
if r or v is not None:
node.append(v._to_xml(node, path))
else:
vs = v
for v in vs:
node.append(v._to_xml(node, path))
def _unparse(self, path):
"""Create a node for this object and unparse all elements and
attributes in the class arrays.
Parameters
----------
path : str
The string path where the XML file is being written to (used for
writing out meshes and image files).
Returns
-------
node : :class:`lxml.etree.Element`
The newly-created node.
"""
node = ET.Element(self._TAG)
self._unparse_simple_attribs(node)
self._unparse_simple_elements(node, path)
return node
def _to_xml(self, parent, path):
"""Create and return an XML node for this object.
Parameters
----------
parent : :class:`lxml.etree.Element`
The parent node that this element will eventually be added to.
This base implementation doesn't use this information, but
classes that override this function may use it.
path : str
The string path where the XML file is being written to (used for
writing out meshes and image files).
Returns
-------
node : :class:`lxml.etree.Element`
The newly-created node.
"""
return self._unparse(path)
###############################################################################
# Link types
###############################################################################
class Box(URDFType):
"""A rectangular prism whose center is at the local origin.
Parameters
----------
size : (3,) float
The length, width, and height of the box in meters.
"""
_ATTRIBS = {
'size': (np.ndarray, True)
}
_TAG = 'box'
def __init__(self, size):
self.size = size
self._meshes = []
@property
def size(self):
"""(3,) float : The length, width, and height of the box in meters.
"""
return self._size
@size.setter
def size(self, value):
self._size = np.asanyarray(value).astype(np.float64)
self._meshes = []
@property
def meshes(self):
"""list of :class:`~trimesh.base.Trimesh` : The triangular meshes
that represent this object.
"""
if len(self._meshes) == 0:
self._meshes = [trimesh.creation.box(extents=self.size)]
return self._meshes
def copy(self, prefix='', scale=None):
"""Create a deep copy with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all names.
Returns
-------
:class:`.Box`
A deep copy.
"""
if scale is None:
scale = 1.0
b = Box(
size=self.size.copy() * scale,
)
return b
class Cylinder(URDFType):
"""A cylinder whose center is at the local origin.
Parameters
----------
radius : float
The radius of the cylinder in meters.
length : float
The length of the cylinder in meters.
"""
_ATTRIBS = {
'radius': (float, True),
'length': (float, True),
}
_TAG = 'cylinder'
def __init__(self, radius, length):
self.radius = radius
self.length = length
self._meshes = None
@property
def radius(self):
"""float : The radius of the cylinder in meters.
"""
return self._radius
@radius.setter
def radius(self, value):
self._radius = float(value)
self._meshes = None
@property
def length(self):
"""float : The length of the cylinder in meters.
"""
return self._length
@length.setter
def length(self, value):
self._length = float(value)
self._meshes = None
@property
def meshes(self):
"""list of :class:`~trimesh.base.Trimesh` : The triangular meshes
that represent this object.
"""
if len(self._meshes) == 0:
self._meshes = [trimesh.creation.cylinder(
radius=self.radius, height=self.length
)]
return self._mesh
def copy(self, prefix='', scale=None):
"""Create a deep copy with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all names.
Returns
-------
:class:`.Cylinder`
A deep copy.
"""
if scale is None:
scale = 1.0
if isinstance(scale, (list, np.ndarray)):
if scale[0] != scale[1]:
raise ValueError('Cannot rescale cylinder geometry with asymmetry in x/y')
c = Cylinder(
radius=self.radius * scale[0],
length=self.length * scale[2],
)
else:
c = Cylinder(
radius=self.radius * scale,
length=self.length * scale,
)
return c
class Sphere(URDFType):
"""A sphere whose center is at the local origin.
Parameters
----------
radius : float
The radius of the sphere in meters.
"""
_ATTRIBS = {
'radius': (float, True),
}
_TAG = 'sphere'
def __init__(self, radius):
self.radius = radius
self._meshes = []
@property
def radius(self):
"""float : The radius of the sphere in meters.
"""
return self._radius
@radius.setter
def radius(self, value):
self._radius = float(value)
self._meshes = []
@property
def meshes(self):
"""list of :class:`~trimesh.base.Trimesh` : The triangular meshes
that represent this object.
"""
if len(self._meshes) == 0:
self._meshes = [trimesh.creation.icosphere(radius=self.radius)]
return self._meshes
def copy(self, prefix='', scale=None):
"""Create a deep copy with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all names.
Returns
-------
:class:`.Sphere`
A deep copy.
"""
if scale is None:
scale = 1.0
if isinstance(scale, (list, np.ndarray)):
if scale[0] != scale[1] or scale[0] != scale[2]:
raise ValueError('Spheres do not support non-uniform scaling!')
scale = scale[0]
s = Sphere(
radius=self.radius * scale,
)
return s
class Mesh(URDFType):
"""A triangular mesh object.
Parameters
----------
filename : str
The path to the mesh that contains this object. This can be
relative to the top-level URDF or an absolute path.
scale : (3,) float, optional
The scaling value for the mesh along the XYZ axes.
If ``None``, assumes no scale is applied.
meshes : list of :class:`~trimesh.base.Trimesh`
A list of meshes that compose this mesh.
The list of meshes is useful for visual geometries that
might be composed of separate trimesh objects.
If not specified, the mesh is loaded from the file using trimesh.
use_package : bool
The parsed XML node will use the `package://` prefix. For ROS compliance.
"""
_ATTRIBS = {
'filename': (str, True),
'scale': (np.ndarray, False)
}
_TAG = 'mesh'
def __init__(self, filename, scale=None, meshes=None, use_package=False):
if meshes is None:
meshes = load_meshes(filename)
self.filename = filename
self.scale = scale
self.meshes = meshes
self.use_package = use_package
@property
def filename(self):
"""str : The path to the mesh file for this object.
"""
return self._filename
@filename.setter
def filename(self, value):
self._filename = value
@property
def scale(self):
"""(3,) float : A scaling for the mesh along its local XYZ axes.
"""
return self._scale
@scale.setter
def scale(self, value):
if value is not None:
value = np.asanyarray(value).astype(np.float64)
self._scale = value
@property
def meshes(self):
"""list of :class:`~trimesh.base.Trimesh` : The triangular meshes
that represent this object.
"""
return self._meshes
@meshes.setter
def meshes(self, value):
if isinstance(value, six.string_types):
value = load_meshes(value)
elif isinstance(value, (list, tuple, set, np.ndarray)):
value = list(value)
if len(value) == 0:
raise ValueError('Mesh must have at least one trimesh.Trimesh')
for m in value:
if not isinstance(m, trimesh.Trimesh):
raise TypeError('Mesh requires a trimesh.Trimesh or a '
'list of them')
elif isinstance(value, trimesh.Trimesh):
value = [value]
else:
raise TypeError('Mesh requires a trimesh.Trimesh')
self._meshes = value
@classmethod
def _from_xml(cls, node, path):
kwargs = cls._parse(node, path)
# Load the mesh, combining collision geometry meshes but keeping
# visual ones separate to preserve colors and textures
fn = get_filename(path, kwargs['filename'])
combine = node.getparent().getparent().tag == Collision._TAG
meshes = load_meshes(fn)
if combine:
# Delete visuals for simplicity
for m in meshes:
m.visual = trimesh.visual.ColorVisuals(mesh=m)
meshes = [meshes[0] + meshes[1:]]
kwargs['meshes'] = meshes
return Mesh(**kwargs)
def _to_xml(self, parent, path):
# Get the filename
fn = get_filename(path, self.filename, makedirs=True)
# compatible formats for multi-mesh files (i.e. trimesh Scenes)
multimesh_compati_exts = ['.glb', '.obj']
# Export the meshes as a single file
meshes = self.meshes
if len(meshes) == 1:
meshes = meshes[0]
elif os.path.splitext(fn)[1] in multimesh_compati_exts:
meshes = trimesh.scene.Scene(geometry=meshes)
trimesh.exchange.export.export_mesh(meshes, fn)
# Unparse the node
node = self._unparse(path)
if self.use_package:
node.attrib['filename'] = "package://" + self.filename
return node
def copy(self, prefix='', scale=None):
"""Create a deep copy with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all names.
Returns
-------
:class:`.Sphere`
A deep copy.
"""
meshes = [m.copy() for m in self.meshes]
if scale is not None:
sm = np.eye(4)
if isinstance(scale, (list, np.ndarray)):
sm[:3,:3] = np.diag(scale)
else:
sm[:3,:3] = np.diag(np.repeat(scale, 3))
for i, m in enumerate(meshes):
meshes[i] = m.apply_transform(sm)
base, fn = os.path.split(self.filename)
fn = '{}{}'.format(prefix, self.filename)
m = Mesh(
filename=os.path.join(base, fn),
scale=(self.scale.copy() if self.scale is not None else None),
meshes=meshes
)
return m
class Geometry(URDFType):
"""A wrapper for all geometry types.
Only one of the following values can be set, all others should be set
to ``None``.
Parameters
----------
box : :class:`.Box`, optional
Box geometry.
cylinder : :class:`.Cylinder`
Cylindrical geometry.
sphere : :class:`.Sphere`
Spherical geometry.
mesh : :class:`.Mesh`
Mesh geometry.
"""
_ELEMENTS = {
'box': (Box, False, False),
'cylinder': (Cylinder, False, False),
'sphere': (Sphere, False, False),
'mesh': (Mesh, False, False),
}
_TAG = 'geometry'
def __init__(self, box=None, cylinder=None, sphere=None, mesh=None):
if (box is None and cylinder is None and
sphere is None and mesh is None):
raise ValueError('At least one geometry element must be set')
self.box = box
self.cylinder = cylinder
self.sphere = sphere
self.mesh = mesh
@property
def box(self):
""":class:`.Box` : Box geometry.
"""
return self._box
@box.setter
def box(self, value):
if value is not None and not isinstance(value, Box):
raise TypeError('Expected Box type')
self._box = value
@property
def cylinder(self):
""":class:`.Cylinder` : Cylinder geometry.
"""
return self._cylinder
@cylinder.setter
def cylinder(self, value):
if value is not None and not isinstance(value, Cylinder):
raise TypeError('Expected Cylinder type')
self._cylinder = value
@property
def sphere(self):
""":class:`.Sphere` : Spherical geometry.
"""
return self._sphere
@sphere.setter
def sphere(self, value):
if value is not None and not isinstance(value, Sphere):
raise TypeError('Expected Sphere type')
self._sphere = value
@property
def mesh(self):
""":class:`.Mesh` : Mesh geometry.
"""
return self._mesh
@mesh.setter
def mesh(self, value):
if value is not None and not isinstance(value, Mesh):
raise TypeError('Expected Mesh type')
self._mesh = value
@property
def geometry(self):
""":class:`.Box`, :class:`.Cylinder`, :class:`.Sphere`, or
:class:`.Mesh` : The valid geometry element.
"""
if self.box is not None:
return self.box
if self.cylinder is not None:
return self.cylinder
if self.sphere is not None:
return self.sphere
if self.mesh is not None:
return self.mesh
return None
@property
def meshes(self):
"""list of :class:`~trimesh.base.Trimesh` : The geometry's triangular
mesh representation(s).
"""
return self.geometry.meshes
def copy(self, prefix='', scale=None):
"""Create a deep copy with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all names.
Returns
-------
:class:`.Geometry`
A deep copy.
"""
v = Geometry(
box=(self.box.copy(prefix=prefix, scale=scale) if self.box else None),
cylinder=(self.cylinder.copy(prefix=prefix, scale=scale) if self.cylinder else None),
sphere=(self.sphere.copy(prefix=prefix, scale=scale) if self.sphere else None),
mesh=(self.mesh.copy(prefix=prefix, scale=scale) if self.mesh else None),
)
return v
class Texture(URDFType):
"""An image-based texture.
Parameters
----------
filename : str
The path to the image that contains this texture. This can be
relative to the top-level URDF or an absolute path.
image : :class:`PIL.Image.Image`, optional
The image for the texture.
If not specified, it is loaded automatically from the filename.
"""
_ATTRIBS = {
'filename': (str, True)
}
_TAG = 'texture'
def __init__(self, filename, image=None):
if image is None:
image = PIL.image.open(filename)
self.filename = filename
self.image = image
@property
def filename(self):
"""str : Path to the image for this texture.
"""
return self._filename
@filename.setter
def filename(self, value):
self._filename = str(value)
@property
def image(self):
""":class:`PIL.Image.Image` : The image for this texture.
"""
return self._image
@image.setter
def image(self, value):
if isinstance(value, str):
value = PIL.Image.open(value)
if isinstance(value, np.ndarray):
value = PIL.Image.fromarray(value)
elif not isinstance(value, PIL.Image.Image):
raise ValueError('Texture only supports numpy arrays '
'or PIL images')
self._image = value
@classmethod
def _from_xml(cls, node, path):
kwargs = cls._parse(node, path)
# Load image
fn = get_filename(path, kwargs['filename'])
kwargs['image'] = PIL.Image.open(fn)
return Texture(**kwargs)
def _to_xml(self, parent, path):
# Save the image
filepath = get_filename(path, self.filename, makedirs=True)
self.image.save(filepath)
return self._unparse(path)
def copy(self, prefix='', scale=None):
"""Create a deep copy with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all names.
Returns
-------
:class:`.Texture`
A deep copy.
"""
v = Texture(
filename=self.filename,
image=self.image.copy()
)
return v
class Material(URDFType):
"""A material for some geometry.
Parameters
----------
name : str
The name of the material.
color : (4,) float, optional
The RGBA color of the material in the range [0,1].
texture : :class:`.Texture`, optional
A texture for the material.
"""
_ATTRIBS = {
'name': (str, True)
}
_ELEMENTS = {
'texture': (Texture, False, False),
}
_TAG = 'material'
def __init__(self, name, color=None, texture=None):
self.name = name
self.color = color
self.texture = texture
@property
def name(self):
"""str : The name of the material.
"""
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def color(self):
"""(4,) float : The RGBA color of the material, in the range [0,1].
"""
return self._color
@color.setter
def color(self, value):
if value is not None:
value = np.asanyarray(value).astype(np.float)
value = np.clip(value, 0.0, 1.0)
if value.shape != (4,):
raise ValueError('Color must be a (4,) float')
self._color = value
@property
def texture(self):
""":class:`.Texture` : The texture for the material.
"""
return self._texture
@texture.setter
def texture(self, value):
if value is not None:
if isinstance(value, six.string_types):
image = PIL.Image.open(value)
value = Texture(filename=value, image=image)
elif not isinstance(value, Texture):
raise ValueError('Invalid type for texture -- expect path to '
'image or Texture')
self._texture = value
@classmethod
def _from_xml(cls, node, path):
kwargs = cls._parse(node, path)
# Extract the color -- it's weirdly an attribute of a subelement
color = node.find('color')
if color is not None:
color = np.fromstring(color.attrib['rgba'], sep=' ', dtype=np.float64)
kwargs['color'] = color
return Material(**kwargs)
def _to_xml(self, parent, path):
# Simplify materials by collecting them at the top level.
# For top-level elements, save the full material specification
if parent.tag == 'robot':
node = self._unparse(path)
if self.color is not None:
color = ET.Element('color')
color.attrib['rgba'] = np.array2string(self.color)[1:-1]
node.append(color)
# For non-top-level elements just save the material with a name
else:
node = ET.Element('material')
node.attrib['name'] = self.name
return node
def copy(self, prefix='', scale=None):
"""Create a deep copy of the material with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all joint and link names.
Returns
-------
:class:`.Material`
A deep copy of the material.
"""
return Material(
name='{}{}'.format(prefix, self.name),
color=self.color,
texture=self.texture
)
class Collision(URDFType):
"""Collision properties of a link.
Parameters
----------
geometry : :class:`.Geometry`
The geometry of the element
name : str, optional
The name of the collision geometry.
origin : (4,4) float, optional
The pose of the collision element relative to the link frame.
Defaults to identity.
"""
_ATTRIBS = {
'name': (str, False)
}
_ELEMENTS = {
'geometry': (Geometry, True, False),
}
_TAG = 'collision'
def __init__(self, name, origin, geometry):
self.geometry = geometry
self.name = name
self.origin = origin
@property
def geometry(self):
""":class:`.Geometry` : The geometry of this element.
"""
return self._geometry
@geometry.setter
def geometry(self, value):
if not isinstance(value, Geometry):
raise TypeError('Must set geometry with Geometry object')
self._geometry = value
@property
def name(self):
"""str : The name of this collision element.
"""
return self._name
@name.setter
def name(self, value):
if value is not None:
value = str(value)
self._name = value
@property
def origin(self):
"""(4,4) float : The pose of this element relative to the link frame.
"""
return self._origin
@origin.setter
def origin(self, value):
self._origin = configure_origin(value)
@classmethod
def _from_xml(cls, node, path):
kwargs = cls._parse(node, path)
kwargs['origin'] = parse_origin(node)
return Collision(**kwargs)
def _to_xml(self, parent, path):
node = self._unparse(path)
node.append(unparse_origin(self.origin))
return node
def copy(self, prefix='', scale=None):
"""Create a deep copy of the visual with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all joint and link names.
Returns
-------
:class:`.Visual`
A deep copy of the visual.
"""
origin=self.origin.copy()
if scale is not None:
if not isinstance(scale, (list, np.ndarray)):
scale = np.repeat(scale, 3)
origin[:3,3] *= scale
return Collision(
name='{}{}'.format(prefix, self.name),
origin=origin,
geometry=self.geometry.copy(prefix=prefix, scale=scale),
)
class Visual(URDFType):
"""Visual properties of a link.
Parameters
----------
geometry : :class:`.Geometry`
The geometry of the element
name : str, optional
The name of the visual geometry.
origin : (4,4) float, optional
The pose of the visual element relative to the link frame.
Defaults to identity.
material : :class:`.Material`, optional
The material of the element.
"""
_ATTRIBS = {
'name': (str, False)
}
_ELEMENTS = {
'geometry': (Geometry, True, False),
'material': (Material, False, False),
}
_TAG = 'visual'
def __init__(self, geometry, name=None, origin=None, material=None):
self.geometry = geometry
self.name = name
self.origin = origin
self.material = material
@property
def geometry(self):
""":class:`.Geometry` : The geometry of this element.
"""
return self._geometry
@geometry.setter
def geometry(self, value):
if not isinstance(value, Geometry):
raise TypeError('Must set geometry with Geometry object')
self._geometry = value
@property
def name(self):
"""str : The name of this visual element.
"""
return self._name
@name.setter
def name(self, value):
if value is not None:
value = str(value)
self._name = value
@property
def origin(self):
"""(4,4) float : The pose of this element relative to the link frame.
"""
return self._origin
@origin.setter
def origin(self, value):
self._origin = configure_origin(value)
@property
def material(self):
""":class:`.Material` : The material for this element.
"""
return self._material
@material.setter
def material(self, value):
if value is not None:
if not isinstance(value, Material):
raise TypeError('Must set material with Material object')
self._material = value
@classmethod
def _from_xml(cls, node, path):
kwargs = cls._parse(node, path)
kwargs['origin'] = parse_origin(node)
return Visual(**kwargs)
def _to_xml(self, parent, path):
node = self._unparse(path)
node.append(unparse_origin(self.origin))
return node
def copy(self, prefix='', scale=None):
"""Create a deep copy of the visual with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all joint and link names.
Returns
-------
:class:`.Visual`
A deep copy of the visual.
"""
origin=self.origin.copy()
if scale is not None:
if not isinstance(scale, (list, np.ndarray)):
scale = np.repeat(scale, 3)
origin[:3,3] *= scale
return Visual(
geometry=self.geometry.copy(prefix=prefix, scale=scale),
name='{}{}'.format(prefix, self.name),
origin=origin,
material=(self.material.copy(prefix=prefix) if self.material else None),
)
class Inertial(URDFType):
"""The inertial properties of a link.
Parameters
----------
mass : float
The mass of the link in kilograms.
inertia : (3,3) float
The 3x3 symmetric rotational inertia matrix.
origin : (4,4) float, optional
The pose of the inertials relative to the link frame.
Defaults to identity if not specified.
"""
_TAG = 'inertial'
def __init__(self, mass, inertia, origin=None):
self.mass = mass
self.inertia = inertia
self.origin = origin
@property
def mass(self):
"""float : The mass of the link in kilograms.
"""
return self._mass
@mass.setter
def mass(self, value):
self._mass = float(value)
@property
def inertia(self):
"""(3,3) float : The 3x3 symmetric rotational inertia matrix.
"""
return self._inertia
@inertia.setter
def inertia(self, value):
value = np.asanyarray(value).astype(np.float64)
if not np.allclose(value, value.T):
raise ValueError('Inertia must be a symmetric matrix')
self._inertia = value
@property
def origin(self):
"""(4,4) float : The pose of the inertials relative to the link frame.
"""
return self._origin
@origin.setter
def origin(self, value):
self._origin = configure_origin(value)
@classmethod
def _from_xml(cls, node, path):
origin = parse_origin(node)
mass = float(node.find('mass').attrib['value'])
n = node.find('inertia')
xx = float(n.attrib['ixx'])
xy = float(n.attrib['ixy'])
xz = float(n.attrib['ixz'])
yy = float(n.attrib['iyy'])
yz = float(n.attrib['iyz'])
zz = float(n.attrib['izz'])
inertia = np.array([
[xx, xy, xz],
[xy, yy, yz],
[xz, yz, zz]
], dtype=np.float64)
return Inertial(mass=mass, inertia=inertia, origin=origin)
def _to_xml(self, parent, path):
node = ET.Element('inertial')
node.append(unparse_origin(self.origin))
mass = ET.Element('mass')
mass.attrib['value'] = str(self.mass)
node.append(mass)
inertia = ET.Element('inertia')
inertia.attrib['ixx'] = str(self.inertia[0,0])
inertia.attrib['ixy'] = str(self.inertia[0,1])
inertia.attrib['ixz'] = str(self.inertia[0,2])
inertia.attrib['iyy'] = str(self.inertia[1,1])
inertia.attrib['iyz'] = str(self.inertia[1,2])
inertia.attrib['izz'] = str(self.inertia[2,2])
node.append(inertia)
return node
def copy(self, prefix='', mass=None, origin=None, inertia=None):
"""Create a deep copy of the visual with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all joint and link names.
Returns
-------
:class:`.Inertial`
A deep copy of the visual.
"""
if mass is None:
mass = self.mass
if origin is None:
origin = self.origin.copy()
if inertia is None:
inertia = self.inertia.copy()
return Inertial(
mass=mass,
inertia=inertia,
origin=origin,
)
###############################################################################
# Joint types
###############################################################################
class JointCalibration(URDFType):
"""The reference positions of the joint.
Parameters
----------
rising : float, optional
When the joint moves in a positive direction, this position will
trigger a rising edge.
falling :
When the joint moves in a positive direction, this position will
trigger a falling edge.
"""
_ATTRIBS = {
'rising': (float, False),
'falling': (float, False)
}
_TAG = 'calibration'
def __init__(self, rising=None, falling=None):
self.rising = rising
self.falling = falling
@property
def rising(self):
"""float : description.
"""
return self._rising
@rising.setter
def rising(self, value):
if value is not None:
value = float(value)
self._rising = value
@property
def falling(self):
"""float : description.
"""
return self._falling
@falling.setter
def falling(self, value):
if value is not None:
value = float(value)
self._falling = value
def copy(self, prefix='', scale=None):
"""Create a deep copy of the visual with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all joint and link names.
Returns
-------
:class:`.JointCalibration`
A deep copy of the visual.
"""
return JointCalibration(
rising=self.rising,
falling=self.falling,
)
class JointDynamics(URDFType):
"""The dynamic properties of the joint.
Parameters
----------
damping : float
The damping value of the joint (Ns/m for prismatic joints,
Nms/rad for revolute).
friction : float
The static friction value of the joint (N for prismatic joints,
Nm for revolute).
"""
_ATTRIBS = {
'damping': (float, False),
'friction': (float, False),
}
_TAG = 'dynamics'
def __init__(self, damping, friction):
self.damping = damping
self.friction = friction
@property
def damping(self):
"""float : The damping value of the joint.
"""
return self._damping
@damping.setter
def damping(self, value):
if value is not None:
value = float(value)
self._damping = value
@property
def friction(self):
"""float : The static friction value of the joint.
"""
return self._friction
@friction.setter
def friction(self, value):
if value is not None:
value = float(value)
self._friction = value
def copy(self, prefix='', scale=None):
"""Create a deep copy of the visual with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all joint and link names.
Returns
-------
:class:`.JointDynamics`
A deep copy of the visual.
"""
return JointDynamics(
damping=self.damping,
friction=self.friction,
)
class JointLimit(URDFType):
"""The limits of the joint.
Parameters
----------
effort : float
The maximum joint effort (N for prismatic joints, Nm for revolute).
velocity : float
The maximum joint velocity (m/s for prismatic joints, rad/s for
revolute).
lower : float, optional
The lower joint limit (m for prismatic joints, rad for revolute).
upper : float, optional
The upper joint limit (m for prismatic joints, rad for revolute).
"""
_ATTRIBS = {
'effort': (float, True),
'velocity': (float, True),
'lower': (float, False),
'upper': (float, False),
}
_TAG = 'limit'
def __init__(self, effort, velocity, lower=None, upper=None):
self.effort = effort
self.velocity = velocity
self.lower = lower
self.upper = upper
@property
def effort(self):
"""float : The maximum joint effort.
"""
return self._effort
@effort.setter
def effort(self, value):
self._effort = float(value)
@property
def velocity(self):
"""float : The maximum joint velocity.
"""
return self._velocity
@velocity.setter
def velocity(self, value):
self._velocity = float(value)
@property
def lower(self):
"""float : The lower joint limit.
"""
return self._lower
@lower.setter
def lower(self, value):
if value is not None:
value = float(value)
self._lower = value
@property
def upper(self):
"""float : The upper joint limit.
"""
return self._upper
@upper.setter
def upper(self, value):
if value is not None:
value = float(value)
self._upper = value
def copy(self, prefix='', scale=None):
"""Create a deep copy of the visual with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all joint and link names.
Returns
-------
:class:`.JointLimit`
A deep copy of the visual.
"""
return JointLimit(
effort=self.effort,
velocity=self.velocity,
lower=self.lower,
upper=self.upper,
)
class JointMimic(URDFType):
"""A mimicry tag for a joint, which forces its configuration to
mimic another joint's.
This joint's configuration value is set equal to
``multiplier * other_joint_cfg + offset``.
Parameters
----------
joint : str
The name of the joint to mimic.
multiplier : float
The joint configuration multiplier. Defaults to 1.0.
offset : float, optional
The joint configuration offset. Defaults to 0.0.
"""
_ATTRIBS = {
'joint': (str, True),
'multiplier': (float, False),
'offset': (float, False),
}
_TAG = 'mimic'
def __init__(self, joint, multiplier=None, offset=None):
self.joint = joint
self.multiplier = multiplier
self.offset = offset
@property
def joint(self):
"""float : The name of the joint to mimic.
"""
return self._joint
@joint.setter
def joint(self, value):
self._joint = str(value)
@property
def multiplier(self):
"""float : The multiplier for the joint configuration.
"""
return self._multiplier
@multiplier.setter
def multiplier(self, value):
if value is not None:
value = float(value)
else:
value = 1.0
self._multiplier = value
@property
def offset(self):
"""float : The offset for the joint configuration
"""
return self._offset
@offset.setter
def offset(self, value):
if value is not None:
value = float(value)
else:
value = 0.0
self._offset = value
def copy(self, prefix='', scale=None):
"""Create a deep copy of the joint mimic with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all joint and link names.
Returns
-------
:class:`.JointMimic`
A deep copy of the joint mimic.
"""
return JointMimic(
joint='{}{}'.format(prefix, self.joint),
multiplier=self.multiplier,
offset=self.offset
)
class SafetyController(URDFType):
"""A controller for joint movement safety.
Parameters
----------
k_velocity : float
An attribute specifying the relation between the effort and velocity
limits.
k_position : float, optional
An attribute specifying the relation between the position and velocity
limits. Defaults to 0.0.
soft_lower_limit : float, optional
The lower joint boundary where the safety controller kicks in.
Defaults to 0.0.
soft_upper_limit : float, optional
The upper joint boundary where the safety controller kicks in.
Defaults to 0.0.
"""
_ATTRIBS = {
'k_velocity': (float, True),
'k_position': (float, False),
'soft_lower_limit': (float, False),
'soft_upper_limit': (float, False),
}
_TAG = 'safety_controller'
def __init__(self, k_velocity, k_position=None, soft_lower_limit=None,
soft_upper_limit=None):
self.k_velocity = k_velocity
self.k_position = k_position
self.soft_lower_limit = soft_lower_limit
self.soft_upper_limit = soft_upper_limit
@property
def soft_lower_limit(self):
"""float : The soft lower limit where the safety controller kicks in.
"""
return self._soft_lower_limit
@soft_lower_limit.setter
def soft_lower_limit(self, value):
if value is not None:
value = float(value)
else:
value = 0.0
self._soft_lower_limit = value
@property
def soft_upper_limit(self):
"""float : The soft upper limit where the safety controller kicks in.
"""
return self._soft_upper_limit
@soft_upper_limit.setter
def soft_upper_limit(self, value):
if value is not None:
value = float(value)
else:
value = 0.0
self._soft_upper_limit = value
@property
def k_position(self):
"""float : A relation between the position and velocity limits.
"""
return self._k_position
@k_position.setter
def k_position(self, value):
if value is not None:
value = float(value)
else:
value = 0.0
self._k_position = value
@property
def k_velocity(self):
"""float : A relation between the effort and velocity limits.
"""
return self._k_velocity
@k_velocity.setter
def k_velocity(self, value):
self._k_velocity = float(value)
def copy(self, prefix='', scale=None):
"""Create a deep copy of the visual with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all joint and link names.
Returns
-------
:class:`.SafetyController`
A deep copy of the visual.
"""
return SafetyController(
k_velocity=self.k_velocity,
k_position=self.k_position,
soft_lower_limit=self.soft_lower_limit,
soft_upper_limit=self.soft_upper_limit,
)
###############################################################################
# Transmission types
###############################################################################
class Actuator(URDFType):
"""An actuator.
Parameters
----------
name : str
The name of this actuator.
mechanicalReduction : str, optional
A specifier for the mechanical reduction at the joint/actuator
transmission.
hardwareInterfaces : list of str, optional
The supported hardware interfaces to the actuator.
"""
_ATTRIBS = {
'name': (str, True),
}
_TAG = 'actuator'
def __init__(self, name, mechanicalReduction=None,
hardwareInterfaces=None):
self.name = name
self.mechanicalReduction = mechanicalReduction
self.hardwareInterfaces = hardwareInterfaces
@property
def name(self):
"""str : The name of this actuator.
"""
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def mechanicalReduction(self):
"""str : A specifier for the type of mechanical reduction.
"""
return self._mechanicalReduction
@mechanicalReduction.setter
def mechanicalReduction(self, value):
if value is not None:
value = str(value)
self._mechanicalReduction = value
@property
def hardwareInterfaces(self):
"""list of str : The supported hardware interfaces.
"""
return self._hardwareInterfaces
@hardwareInterfaces.setter
def hardwareInterfaces(self, value):
if value is None:
value = []
else:
value = list(value)
for i, v in enumerate(value):
value[i] = str(v)
self._hardwareInterfaces = value
@classmethod
def _from_xml(cls, node, path):
kwargs = cls._parse(node, path)
mr = node.find('mechanicalReduction')
if mr is not None:
mr = float(mr.text)
kwargs['mechanicalReduction'] = mr
hi = node.findall('hardwareInterface')
if len(hi) > 0:
hi = [h.text for h in hi]
kwargs['hardwareInterfaces'] = hi
return Actuator(**kwargs)
def _to_xml(self, parent, path):
node = self._unparse(path)
if self.mechanicalReduction is not None:
mr = ET.Element('mechanicalReduction')
mr.text = str(self.mechanicalReduction)
node.append(mr)
if len(self.hardwareInterfaces) > 0:
for hi in self.hardwareInterfaces:
h = ET.Element('hardwareInterface')
h.text = hi
node.append(h)
return node
def copy(self, prefix='', scale=None):
"""Create a deep copy of the visual with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all joint and link names.
Returns
-------
:class:`.Actuator`
A deep copy of the visual.
"""
return Actuator(
name='{}{}'.format(prefix, self.name),
mechanicalReduction=self.mechanicalReduction,
hardwareInterfaces=self.hardwareInterfaces.copy(),
)
class TransmissionJoint(URDFType):
"""A transmission joint specification.
Parameters
----------
name : str
The name of this actuator.
hardwareInterfaces : list of str, optional
The supported hardware interfaces to the actuator.
"""
_ATTRIBS = {
'name': (str, True),
}
_TAG = 'joint'
def __init__(self, name, hardwareInterfaces):
self.name = name
self.hardwareInterfaces = hardwareInterfaces
@property
def name(self):
"""str : The name of this transmission joint.
"""
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def hardwareInterfaces(self):
"""list of str : The supported hardware interfaces.
"""
return self._hardwareInterfaces
@hardwareInterfaces.setter
def hardwareInterfaces(self, value):
if value is None:
value = []
else:
value = list(value)
for i, v in enumerate(value):
value[i] = str(v)
self._hardwareInterfaces = value
@classmethod
def _from_xml(cls, node, path):
kwargs = cls._parse(node, path)
hi = node.findall('hardwareInterface')
if len(hi) > 0:
hi = [h.text for h in hi]
kwargs['hardwareInterfaces'] = hi
return TransmissionJoint(**kwargs)
def _to_xml(self, parent, path):
node = self._unparse(path)
if len(self.hardwareInterfaces) > 0:
for hi in self.hardwareInterfaces:
h = ET.Element('hardwareInterface')
h.text = hi
node.append(h)
return node
def copy(self, prefix='', scale=None):
"""Create a deep copy with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all names.
Returns
-------
:class:`.TransmissionJoint`
A deep copy.
"""
return TransmissionJoint(
name='{}{}'.format(prefix, self.name),
hardwareInterfaces=self.hardwareInterfaces.copy(),
)
###############################################################################
# Top-level types
###############################################################################
class Transmission(URDFType):
"""An element that describes the relationship between an actuator and a
joint.
Parameters
----------
name : str
The name of this transmission.
trans_type : str
The type of this transmission.
joints : list of :class:`.TransmissionJoint`
The joints connected to this transmission.
actuators : list of :class:`.Actuator`
The actuators connected to this transmission.
"""
_ATTRIBS = {
'name': (str, True),
}
_ELEMENTS = {
'joints': (TransmissionJoint, True, True),
'actuators': (Actuator, True, True),
}
_TAG = 'transmission'
def __init__(self, name, trans_type, joints=None, actuators=None):
self.name = name
self.trans_type = trans_type
self.joints = joints
self.actuators = actuators
@property
def name(self):
"""str : The name of this transmission.
"""
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def trans_type(self):
"""str : The type of this transmission.
"""
return self._trans_type
@trans_type.setter
def trans_type(self, value):
self._trans_type = str(value)
@property
def joints(self):
""":class:`.TransmissionJoint` : The joints the transmission is
connected to.
"""
return self._joints
@joints.setter
def joints(self, value):
if value is None:
value = []
else:
value = list(value)
for v in value:
if not isinstance(v, TransmissionJoint):
raise TypeError(
'Joints expects a list of TransmissionJoint'
)
self._joints = value
@property
def actuators(self):
""":class:`.Actuator` : The actuators the transmission is connected to.
"""
return self._actuators
@actuators.setter
def actuators(self, value):
if value is None:
value = []
else:
value = list(value)
for v in value:
if not isinstance(v, Actuator):
raise TypeError(
'Actuators expects a list of Actuator'
)
self._actuators = value
@classmethod
def _from_xml(cls, node, path):
kwargs = cls._parse(node, path)
kwargs['trans_type'] = node.find('type').text
return Transmission(**kwargs)
def _to_xml(self, parent, path):
node = self._unparse(path)
ttype = ET.Element('type')
ttype.text = self.trans_type
node.append(ttype)
return node
def copy(self, prefix='', scale=None):
"""Create a deep copy with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all names.
Returns
-------
:class:`.Transmission`
A deep copy.
"""
return Transmission(
name='{}{}'.format(prefix, self.name),
trans_type=self.trans_type,
joints=[j.copy(prefix) for j in self.joints],
actuators=[a.copy(prefix) for a in self.actuators],
)
class Joint(URDFType):
"""A connection between two links.
There are several types of joints, including:
- ``fixed`` - a joint that cannot move.
- ``prismatic`` - a joint that slides along the joint axis.
- ``revolute`` - a hinge joint that rotates about the axis with a limited
range of motion.
- ``continuous`` - a hinge joint that rotates about the axis with an
unlimited range of motion.
- ``planar`` - a joint that moves in the plane orthogonal to the axis.
- ``floating`` - a joint that can move in 6DoF.
- ``spherical`` - a spherical joint that moves in 3DoF.
Parameters
----------
name : str
The name of this joint.
parent : str
The name of the parent link of this joint.
child : str
The name of the child link of this joint.
joint_type : str
The type of the joint. Must be one of :obj:`.Joint.TYPES`.
axis : (3,) float, optional
The axis of the joint specified in joint frame. Defaults to
``[1,0,0]``.
origin : (4,4) float, optional
The pose of the child link with respect to the parent link's frame.
The joint frame is defined to be coincident with the child link's
frame, so this is also the pose of the joint frame with respect to
the parent link's frame.
limit : :class:`.JointLimit`, optional
Limit for the joint. Only required for revolute and prismatic
joints.
dynamics : :class:`.JointDynamics`, optional
Dynamics for the joint.
safety_controller : :class`.SafetyController`, optional
The safety controller for this joint.
calibration : :class:`.JointCalibration`, optional
Calibration information for the joint.
mimic : :class:`JointMimic`, optional
Joint mimicry information.
"""
TYPES = ['fixed', 'prismatic', 'revolute',
'continuous', 'spherical', 'floating', 'planar']
_ATTRIBS = {
'name': (str, True),
}
_ELEMENTS = {
'dynamics': (JointDynamics, False, False),
'limit': (JointLimit, False, False),
'mimic': (JointMimic, False, False),
'safety_controller': (SafetyController, False, False),
'calibration': (JointCalibration, False, False),
}
_TAG = 'joint'
def __init__(self, name, joint_type, parent, child, axis=None, origin=None,
limit=None, dynamics=None, safety_controller=None,
calibration=None, mimic=None):
self.name = name
self.parent = parent
self.child = child
self.joint_type = joint_type
self.axis = axis
self.origin = origin
self.limit = limit
self.dynamics = dynamics
self.safety_controller = safety_controller
self.calibration = calibration
self.mimic = mimic
@property
def name(self):
"""str : Name for this joint.
"""
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def joint_type(self):
"""str : The type of this joint.
"""
return self._joint_type
@joint_type.setter
def joint_type(self, value):
value = str(value)
if value not in Joint.TYPES:
raise ValueError('Unsupported joint type {}'.format(value))
self._joint_type = value
@property
def parent(self):
"""str : The name of the parent link.
"""
return self._parent
@parent.setter
def parent(self, value):
self._parent = str(value)
@property
def child(self):
"""str : The name of the child link.
"""
return self._child
@child.setter
def child(self, value):
self._child = str(value)
@property
def axis(self):
"""(3,) float : The joint axis in the joint frame.
"""
return self._axis
@axis.setter
def axis(self, value):
if value is None:
value = np.array([1.0, 0.0, 0.0], dtype=np.float64)
else:
value = np.asanyarray(value, dtype=np.float64)
if value.shape != (3,):
raise ValueError('Invalid shape for axis, should be (3,)')
value = value / np.linalg.norm(value)
self._axis = value
@property
def origin(self):
"""(4,4) float : The pose of child and joint frames relative to the
parent link's frame.
"""
return self._origin
@origin.setter
def origin(self, value):
self._origin = configure_origin(value)
@property
def limit(self):
""":class:`.JointLimit` : The limits for this joint.
"""
return self._limit
@limit.setter
def limit(self, value):
if value is None:
if self.joint_type in ['prismatic', 'revolute']:
raise ValueError('Require joint limit for prismatic and '
'revolute joints')
elif not isinstance(value, JointLimit):
raise TypeError('Expected JointLimit type')
self._limit = value
@property
def dynamics(self):
""":class:`.JointDynamics` : The dynamics for this joint.
"""
return self._dynamics
@dynamics.setter
def dynamics(self, value):
if value is not None:
if not isinstance(value, JointDynamics):
raise TypeError('Expected JointDynamics type')
self._dynamics = value
@property
def safety_controller(self):
""":class:`.SafetyController` : The safety controller for this joint.
"""
return self._safety_controller
@safety_controller.setter
def safety_controller(self, value):
if value is not None:
if not isinstance(value, SafetyController):
raise TypeError('Expected SafetyController type')
self._safety_controller = value
@property
def calibration(self):
""":class:`.JointCalibration` : The calibration for this joint.
"""
return self._calibration
@calibration.setter
def calibration(self, value):
if value is not None:
if not isinstance(value, JointCalibration):
raise TypeError('Expected JointCalibration type')
self._calibration = value
@property
def mimic(self):
""":class:`.JointMimic` : The mimic for this joint.
"""
return self._mimic
@mimic.setter
def mimic(self, value):
if value is not None:
if not isinstance(value, JointMimic):
raise TypeError('Expected JointMimic type')
self._mimic = value
def is_valid(self, cfg):
"""Check if the provided configuration value is valid for this joint.
Parameters
----------
cfg : float, (2,) float, (6,) float, or (4,4) float
The configuration of the joint.
Returns
-------
is_valid : bool
True if the configuration is valid, and False otherwise.
"""
if self.joint_type not in ['fixed', 'revolute']:
return True
if self.joint_limit is None:
return True
cfg = float(cfg)
lower = -np.infty
upper = np.infty
if self.limit.lower is not None:
lower = self.limit.lower
if self.limit.upper is not None:
upper = self.limit.upper
return (cfg >= lower and cfg <= upper)
def get_child_pose(self, cfg=None):
"""Computes the child pose relative to a parent pose for a given
configuration value.
Parameters
----------
cfg : float, (2,) float, (6,) float, or (4,4) float
The configuration values for this joint. They are interpreted
based on the joint type as follows:
- ``fixed`` - not used.
- ``prismatic`` - a translation along the axis in meters.
- ``revolute`` - a rotation about the axis in radians.
- ``continuous`` - a rotation about the axis in radians.
- ``planar`` - the x and y translation values in the plane.
- ``floating`` - the xyz values followed by the rpy values,
or a (4,4) matrix.
- ``spherical`` - same as ``floating``.
If ``cfg`` is ``None``, then this just returns the joint pose.
Returns
-------
pose : (4,4) float
The pose of the child relative to the parent.
"""
if cfg is None:
return self.origin
elif self.joint_type == 'fixed':
return self.origin
elif self.joint_type in ['revolute', 'continuous']:
if cfg is None:
cfg = 0.0
else:
cfg = float(cfg)
R = trimesh.transformations.rotation_matrix(cfg, self.axis)
return self.origin.dot(R)
elif self.joint_type == 'prismatic':
if cfg is None:
cfg = 0.0
else:
cfg = float(cfg)
translation = np.eye(4, dtype=np.float64)
translation[:3,3] = self.axis * cfg
return self.origin.dot(translation)
elif self.joint_type == 'planar':
if cfg is None:
cfg = np.zeros(2, dtype=np.float64)
else:
cfg = np.asanyarray(cfg, dtype=np.float64)
if cfg.shape != (2,):
raise ValueError(
'(2,) float configuration required for planar joints'
)
translation = np.eye(4, dtype=np.float64)
translation[:3,3] = self.origin[:3,:2].dot(cfg)
return self.origin.dot(translation)
elif self.joint_type in ['floating', 'spherical']:
if cfg is None:
cfg = np.zeros(6, dtype=np.float64)
else:
if self.joint_type == 'spherical':
if cfg.shape == (4, 4):
assert np.allclose(cfg[:3, 3], 0), "spherical joint should have no translation component"
cfg = configure_origin(cfg)
if cfg is None:
raise ValueError('Invalid configuration for floating joint')
return self.origin.dot(cfg)
else:
raise ValueError('Invalid configuration')
def get_child_poses(self, cfg, n_cfgs):
"""Computes the child pose relative to a parent pose for a given set of
configuration values.
Parameters
----------
cfg : (n,) float or None
The configuration values for this joint. They are interpreted
based on the joint type as follows:
- ``fixed`` - not used.
- ``prismatic`` - a translation along the axis in meters.
- ``revolute`` - a rotation about the axis in radians.
- ``continuous`` - a rotation about the axis in radians.
- ``planar`` - Not implemented.
- ``spherical`` - Not implemented.
- ``floating`` - Not implemented.
If ``cfg`` is ``None``, then this just returns the joint pose.
Returns
-------
poses : (n,4,4) float
The poses of the child relative to the parent.
"""
if cfg is None:
return np.tile(self.origin, (n_cfgs, 1, 1))
elif self.joint_type == 'fixed':
return np.tile(self.origin, (n_cfgs, 1, 1))
elif self.joint_type in ['revolute', 'continuous']:
if cfg is None:
cfg = np.zeros(n_cfgs)
return np.matmul(self.origin, self._rotation_matrices(cfg, self.axis))
elif self.joint_type == 'prismatic':
if cfg is None:
cfg = np.zeros(n_cfgs)
translation = np.tile(np.eye(4), (n_cfgs, 1, 1))
translation[:,:3,3] = self.axis * cfg[:,np.newaxis]
return np.matmul(self.origin, translation)
elif self.joint_type == 'planar':
raise NotImplementedError()
elif self.joint_type in ['floating', 'spherical']:
raise NotImplementedError()
else:
raise ValueError('Invalid configuration')
@classmethod
def _from_xml(cls, node, path):
kwargs = cls._parse(node, path)
kwargs['joint_type'] = str(node.attrib['type'])
kwargs['parent'] = node.find('parent').attrib['link']
kwargs['child'] = node.find('child').attrib['link']
axis = node.find('axis')
if axis is not None:
axis = np.fromstring(axis.attrib['xyz'], sep=' ')
kwargs['axis'] = axis
kwargs['origin'] = parse_origin(node)
return Joint(**kwargs)
def _to_xml(self, parent, path):
node = self._unparse(path)
parent = ET.Element('parent')
parent.attrib['link'] = self.parent
node.append(parent)
child = ET.Element('child')
child.attrib['link'] = self.child
node.append(child)
if self.axis is not None:
axis = ET.Element('axis')
axis.attrib['xyz'] = np.array2string(self.axis)[1:-1]
node.append(axis)
node.append(unparse_origin(self.origin))
node.attrib['type'] = self.joint_type
return node
def _rotation_matrices(self, angles, axis):
"""Compute rotation matrices from angle/axis representations.
Parameters
----------
angles : (n,) float
The angles.
axis : (3,) float
The axis.
Returns
-------
rots : (n,4,4)
The rotation matrices
"""
axis = axis / np.linalg.norm(axis)
sina = np.sin(angles)
cosa = np.cos(angles)
M = np.tile(np.eye(4), (len(angles), 1, 1))
M[:,0,0] = cosa
M[:,1,1] = cosa
M[:,2,2] = cosa
M[:,:3,:3] += (
np.tile(np.outer(axis, axis), (len(angles), 1, 1)) *
(1.0 - cosa)[:, np.newaxis, np.newaxis]
)
M[:,:3,:3] += np.tile(np.array([
[0.0, -axis[2], axis[1]],
[axis[2], 0.0, -axis[0]],
[-axis[1], axis[0], 0.0]]
), (len(angles), 1, 1)) * sina[:, np.newaxis, np.newaxis]
return M
def copy(self, prefix='', scale=None):
"""Create a deep copy of the joint with the prefix applied to all names.
Parameters
----------
prefix : str
A prefix to apply to all joint and link names.
Returns
-------
:class:`.Joint`
A deep copy of the joint.
"""
origin = self.origin.copy()
if scale is not None:
if not isinstance(scale, (list, np.ndarray)):
scale = np.repeat(scale, 3)
origin[:3,3] *= scale
cpy = Joint(
name='{}{}'.format(prefix, self.name),
joint_type=self.joint_type,
parent='{}{}'.format(prefix, self.parent),
child='{}{}'.format(prefix, self.child),
axis=self.axis.copy(),
origin=origin,
limit=(self.limit.copy(prefix, scale) if self.limit else None),
dynamics=(self.dynamics.copy(prefix,scale) if self.dynamics else None),
safety_controller=(self.safety_controller.copy(prefix, scale) if
self.safety_controller else None),
calibration=(self.calibration.copy(prefix, scale) if self.calibration else None),
mimic=(self.mimic.copy(prefix=prefix, scale=scale) if self.mimic else None)
)
return cpy
class Link(URDFType):
"""A link of a rigid object.
Parameters
----------
name : str
The name of the link.
inertial : :class:`.Inertial`, optional
The inertial properties of the link.
visuals : list of :class:`.Visual`, optional
The visual properties of the link.
collsions : list of :class:`.Collision`, optional
The collision properties of the link.
"""
_ATTRIBS = {
'name': (str, True),
}
_ELEMENTS = {
'inertial': (Inertial, False, False),
'visuals': (Visual, False, True),
'collisions': (Collision, False, True),
}
_TAG = 'link'
def __init__(self, name, inertial, visuals, collisions):
self.name = name
self.inertial = inertial
self.visuals = visuals
self.collisions = collisions
self._collision_mesh = None
@property
def name(self):
"""str : The name of this link.
"""
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def inertial(self):
""":class:`.Inertial` : Inertial properties of the link.
"""
return self._inertial
@inertial.setter
def inertial(self, value):
if value is not None and not isinstance(value, Inertial):
raise TypeError('Expected Inertial object')
# Set default inertial
if value is None:
value = Inertial(mass=1.0, inertia=np.eye(3))
self._inertial = value
@property
def visuals(self):
"""list of :class:`.Visual` : The visual properties of this link.
"""
return self._visuals
@visuals.setter
def visuals(self, value):
if value is None:
value = []
else:
value = list(value)
for v in value:
if not isinstance(v, Visual):
raise ValueError('Expected list of Visual objects')
self._visuals = value
@property
def collisions(self):
"""list of :class:`.Collision` : The collision properties of this link.
"""
return self._collisions
@collisions.setter
def collisions(self, value):
if value is None:
value = []
else:
value = list(value)
for v in value:
if not isinstance(v, Collision):
raise ValueError('Expected list of Collision objects')
self._collisions = value
@property
def collision_mesh(self):
""":class:`~trimesh.base.Trimesh` : A single collision mesh for
the link, specified in the link frame, or None if there isn't one.
"""
if len(self.collisions) == 0:
return None
if self._collision_mesh is None:
meshes = []
for c in self.collisions:
for m in c.geometry.meshes:
m = m.copy()
pose = c.origin
if c.geometry.mesh is not None:
if c.geometry.mesh.scale is not None:
S = np.eye(4)
S[:3,:3] = np.diag(c.geometry.mesh.scale)
pose = pose.dot(S)
m.apply_transform(pose)
meshes.append(m)
if len(meshes) == 0:
return None
self._collision_mesh = (meshes[0] + meshes[1:])
return self._collision_mesh
def copy(self, prefix='', scale=None, collision_only=False):
"""Create a deep copy of the link.
Parameters
----------
prefix : str
A prefix to apply to all joint and link names.
Returns
-------
link : :class:`.Link`
A deep copy of the Link.
"""
inertial = self.inertial.copy() if self.inertial is not None else None
cm = self._collision_mesh
if scale is not None:
if self.collision_mesh is not None and self.inertial is not None:
sm = np.eye(4)
if not isinstance(scale, (list, np.ndarray)):
scale = np.repeat(scale, 3)
sm[:3,:3] = np.diag(scale)
cm = self.collision_mesh.copy()
cm.density = self.inertial.mass / cm.volume
cm.apply_transform(sm)
cmm = np.eye(4)
cmm[:3,3] = cm.center_mass
inertial = Inertial(mass=cm.mass, inertia=cm.moment_inertia,
origin=cmm)
visuals = None
if not collision_only:
visuals=[v.copy(prefix=prefix, scale=scale) for v in self.visuals]
cpy = Link(
name='{}{}'.format(prefix, self.name),
inertial=inertial,
visuals=visuals,
collisions=[v.copy(prefix=prefix, scale=scale) for v in self.collisions],
)
cpy._collision_mesh = cm
return cpy
class URDF(URDFType):
"""The top-level URDF specification.
The URDF encapsulates an articulated object, such as a robot or a gripper.
It is made of links and joints that tie them together and define their
relative motions.
Parameters
----------
name : str
The name of the URDF.
links : list of :class:`.Link`
The links of the URDF.
joints : list of :class:`.Joint`, optional
The joints of the URDF.
transmissions : list of :class:`.Transmission`, optional
The transmissions of the URDF.
materials : list of :class:`.Material`, optional
The materials for the URDF.
other_xml : str, optional
A string containing any extra XML for extensions.
"""
_ATTRIBS = {
'name': (str, True),
}
_ELEMENTS = {
'links': (Link, True, True),
'joints': (Joint, False, True),
'transmissions': (Transmission, False, True),
'materials': (Material, False, True),
}
_TAG = 'robot'
def __init__(self, name, links, joints=None,
transmissions=None, materials=None, other_xml=None):
if joints is None:
joints = []
if transmissions is None:
transmissions = []
if materials is None:
materials = []
self.name = name
self.other_xml = other_xml
# No setters for these
self._links = list(links)
self._joints = list(joints)
self._transmissions = list(transmissions)
self._materials = list(materials)
# Set up private helper maps from name to value
self._link_map = {}
self._joint_map = {}
self._transmission_map = {}
self._material_map = {}
for x in self._links:
if x.name in self._link_map:
raise ValueError('Two links with name {} found'.format(x.name))
self._link_map[x.name] = x
for x in self._joints:
if x.name in self._joint_map:
raise ValueError('Two joints with name {} '
'found'.format(x.name))
self._joint_map[x.name] = x
for x in self._transmissions:
if x.name in self._transmission_map:
raise ValueError('Two transmissions with name {} '
'found'.format(x.name))
self._transmission_map[x.name] = x
for x in self._materials:
if x.name in self._material_map:
raise ValueError('Two materials with name {} '
'found'.format(x.name))
self._material_map[x.name] = x
# Synchronize materials between links and top-level set
self._merge_materials()
# Validate the joints and transmissions
actuated_joints = self._validate_joints()
self._validate_transmissions()
# Create the link graph and base link/end link sets
self._G = nx.DiGraph()
# Add all links
for link in self.links:
self._G.add_node(link)
# Add all edges from CHILDREN TO PARENTS, with joints as their object
for joint in self.joints:
parent = self._link_map[joint.parent]
child = self._link_map[joint.child]
self._G.add_edge(child, parent, joint=joint)
# Validate the graph and get the base and end links
self._base_link, self._end_links = self._validate_graph()
# Cache the paths to the base link
self._paths_to_base = nx.shortest_path(
self._G, target=self._base_link
)
self._actuated_joints = self._sort_joints(actuated_joints)
# Cache the reverse topological order (useful for speeding up FK,
# as we want to start at the base and work outward to cache
# computation.
self._reverse_topo = list(reversed(list(nx.topological_sort(self._G))))
@property
def name(self):
"""str : The name of the URDF.
"""
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def links(self):
"""list of :class:`.Link` : The links of the URDF.
This returns a copy of the links array which cannot be edited
directly. If you want to add or remove links, use
the appropriate functions.
"""
return copy.copy(self._links)
@property
def link_map(self):
"""dict : Map from link names to the links themselves.
This returns a copy of the link map which cannot be edited
directly. If you want to add or remove links, use
the appropriate functions.
"""
return copy.copy(self._link_map)
@property
def joints(self):
"""list of :class:`.Joint` : The links of the URDF.
This returns a copy of the joints array which cannot be edited
directly. If you want to add or remove joints, use
the appropriate functions.
"""
return copy.copy(self._joints)
@property
def joint_map(self):
"""dict : Map from joint names to the joints themselves.
This returns a copy of the joint map which cannot be edited
directly. If you want to add or remove joints, use
the appropriate functions.
"""
return copy.copy(self._joint_map)
@property
def transmissions(self):
"""list of :class:`.Transmission` : The transmissions of the URDF.
This returns a copy of the transmissions array which cannot be edited
directly. If you want to add or remove transmissions, use
the appropriate functions.
"""
return copy.copy(self._transmissions)
@property
def transmission_map(self):
"""dict : Map from transmission names to the transmissions themselves.
This returns a copy of the transmission map which cannot be edited
directly. If you want to add or remove transmissions, use
the appropriate functions.
"""
return copy.copy(self._transmission_map)
@property
def materials(self):
"""list of :class:`.Material` : The materials of the URDF.
This returns a copy of the materials array which cannot be edited
directly. If you want to add or remove materials, use
the appropriate functions.
"""
return copy.copy(self._materials)
@property
def material_map(self):
"""dict : Map from material names to the materials themselves.
This returns a copy of the material map which cannot be edited
directly. If you want to add or remove materials, use
the appropriate functions.
"""
return copy.copy(self._material_map)
@property
def other_xml(self):
"""str : Any extra XML that belongs with the URDF.
"""
return self._other_xml
@other_xml.setter
def other_xml(self, value):
self._other_xml = value
@property
def actuated_joints(self):
"""list of :class:`.Joint` : The joints that are independently
actuated.
This excludes mimic joints and fixed joints. The joints are listed
in topological order, starting from the base-most joint.
"""
return self._actuated_joints
@property
def actuated_joint_names(self):
"""list of :class:`.Joint` : The names of joints that are independently
actuated.
This excludes mimic joints and fixed joints. The joints are listed
in topological order, starting from the base-most joint.
"""
return [j.name for j in self._actuated_joints]
def cfg_to_vector(self, cfg):
"""Convert a configuration dictionary into a configuration vector.
Parameters
----------
cfg : dict or None
The configuration value.
Returns
-------
vec : (n,) float
The configuration vector, or None if no actuated joints present.
"""
if cfg is None:
if len(self.actuated_joints) > 0:
return np.zeros(len(self.actuated_joints))
else:
return None
elif isinstance(cfg, (list, tuple, np.ndarray)):
return np.asanyarray(cfg)
elif isinstance(cfg, dict):
vec = np.zeros(len(self.actuated_joints))
for i, jn in enumerate(self.actuated_joint_names):
if jn in cfg:
vec[i] = cfg[jn]
return vec
else:
raise ValueError('Invalid configuration: {}'.format(cfg))
@property
def base_link(self):
""":class:`.Link`: The base link for the URDF.
The base link is the single link that has no parent.
"""
return self._base_link
@property
def end_links(self):
"""list of :class:`.Link`: The end links for the URDF.
The end links are the links that have no children.
"""
return self._end_links
@property
def joint_limit_cfgs(self):
"""tuple of dict : The lower-bound and upper-bound joint configuration
maps.
The first map is the lower-bound map, which maps limited joints to
their lower joint limits.
The second map is the upper-bound map, which maps limited joints to
their upper joint limits.
"""
lb = {}
ub = {}
for joint in self.actuated_joints:
if joint.limit is not None:
if joint.limit.lower is not None:
lb[joint] = joint.limit.lower
if joint.limit.upper is not None:
ub[joint] = joint.limit.upper
return (lb, ub)
@property
def joint_limits(self):
"""(n,2) float : A lower and upper limit for each joint.
"""
limits = []
for joint in self.actuated_joints:
limit = [-np.infty, np.infty]
if joint.limit is not None:
if joint.limit.lower is not None:
limit[0] = joint.limit.lower
if joint.limit.upper is not None:
limit[1] = joint.limit.upper
limits.append(limit)
return np.array(limits)
def link_fk(self, cfg=None, link=None, links=None, use_names=False):
"""Computes the poses of the URDF's links via forward kinematics.
Parameters
----------
cfg : dict or (n), float
A map from joints or joint names to configuration values for
each joint, or a list containing a value for each actuated joint
in sorted order from the base link.
If not specified, all joints are assumed to be in their default
configurations.
link : str or :class:`.Link`
A single link or link name to return a pose for.
links : list of str or list of :class:`.Link`
The links or names of links to perform forward kinematics on.
Only these links will be in the returned map. If neither
link nor links are specified all links are returned.
use_names : bool
If True, the returned dictionary will have keys that are string
link names rather than the links themselves.
Returns
-------
fk : dict or (4,4) float
A map from links to 4x4 homogenous transform matrices that
position them relative to the base link's frame, or a single
4x4 matrix if ``link`` is specified.
"""
# Process config value
joint_cfg = self._process_cfg(cfg)
# Process link set
link_set = set()
if link is not None:
if isinstance(link, six.string_types):
link_set.add(self._link_map[link])
elif isinstance(link, Link):
link_set.add(link)
elif links is not None:
for lnk in links:
if isinstance(lnk, six.string_types):
link_set.add(self._link_map[lnk])
elif isinstance(lnk, Link):
link_set.add(lnk)
else:
raise TypeError('Got object of type {} in links list'
.format(type(lnk)))
else:
link_set = self.links
# Compute forward kinematics in reverse topological order
fk = OrderedDict()
for lnk in self._reverse_topo:
if lnk not in link_set:
continue
pose = np.eye(4, dtype=np.float64)
path = self._paths_to_base[lnk]
for i in range(len(path) - 1):
child = path[i]
parent = path[i + 1]
joint = self._G.get_edge_data(child, parent)['joint']
cfg = None
if joint.mimic is not None:
mimic_joint = self._joint_map[joint.mimic.joint]
if mimic_joint in joint_cfg:
cfg = joint_cfg[mimic_joint]
cfg = joint.mimic.multiplier * cfg + joint.mimic.offset
elif joint in joint_cfg:
cfg = joint_cfg[joint]
pose = joint.get_child_pose(cfg).dot(pose)
# Check existing FK to see if we can exit early
if parent in fk:
pose = fk[parent].dot(pose)
break
fk[lnk] = pose
if link:
if isinstance(link, six.string_types):
return fk[self._link_map[link]]
else:
return fk[link]
if use_names:
return {ell.name: fk[ell] for ell in fk}
return fk
def link_fk_batch(self, cfgs=None, link=None, links=None, use_names=False):
"""Computes the poses of the URDF's links via forward kinematics in a batch.
Parameters
----------
cfgs : dict, list of dict, or (n,m), float
One of the following: (A) a map from joints or joint names to vectors
of joint configuration values, (B) a list of maps from joints or joint names
to single configuration values, or (C) a list of ``n`` configuration vectors,
each of which has a vector with an entry for each actuated joint.
link : str or :class:`.Link`
A single link or link name to return a pose for.
links : list of str or list of :class:`.Link`
The links or names of links to perform forward kinematics on.
Only these links will be in the returned map. If neither
link nor links are specified all links are returned.
use_names : bool
If True, the returned dictionary will have keys that are string
link names rather than the links themselves.
Returns
-------
fk : dict or (n,4,4) float
A map from links to a (n,4,4) vector of homogenous transform matrices that
position the links relative to the base link's frame, or a single
nx4x4 matrix if ``link`` is specified.
"""
joint_cfgs, n_cfgs = self._process_cfgs(cfgs)
# Process link set
link_set = set()
if link is not None:
if isinstance(link, six.string_types):
link_set.add(self._link_map[link])
elif isinstance(link, Link):
link_set.add(link)
elif links is not None:
for lnk in links:
if isinstance(lnk, six.string_types):
link_set.add(self._link_map[lnk])
elif isinstance(lnk, Link):
link_set.add(lnk)
else:
raise TypeError('Got object of type {} in links list'
.format(type(lnk)))
else:
link_set = self.links
# Compute FK mapping each link to a vector of matrices, one matrix per cfg
fk = OrderedDict()
for lnk in self._reverse_topo:
if lnk not in link_set:
continue
poses = np.tile(np.eye(4, dtype=np.float64), (n_cfgs, 1, 1))
path = self._paths_to_base[lnk]
for i in range(len(path) - 1):
child = path[i]
parent = path[i + 1]
joint = self._G.get_edge_data(child, parent)['joint']
cfg_vals = None
if joint.mimic is not None:
mimic_joint = self._joint_map[joint.mimic.joint]
if mimic_joint in joint_cfgs:
cfg_vals = joint_cfgs[mimic_joint]
cfg_vals = joint.mimic.multiplier * cfg_vals + joint.mimic.offset
elif joint in joint_cfgs:
cfg_vals = joint_cfgs[joint]
poses = np.matmul(joint.get_child_poses(cfg_vals, n_cfgs), poses)
if parent in fk:
poses = np.matmul(fk[parent], poses)
break
fk[lnk] = poses
if link:
if isinstance(link, six.string_types):
return fk[self._link_map[link]]
else:
return fk[link]
if use_names:
return {ell.name: fk[ell] for ell in fk}
return fk
def visual_geometry_fk(self, cfg=None, links=None):
"""Computes the poses of the URDF's visual geometries using fk.
Parameters
----------
cfg : dict or (n), float
A map from joints or joint names to configuration values for
each joint, or a list containing a value for each actuated joint
in sorted order from the base link.
If not specified, all joints are assumed to be in their default
configurations.
links : list of str or list of :class:`.Link`
The links or names of links to perform forward kinematics on.
Only geometries from these links will be in the returned map.
If not specified, all links are returned.
Returns
-------
fk : dict
A map from :class:`Geometry` objects that are part of the visual
elements of the specified links to the 4x4 homogenous transform
matrices that position them relative to the base link's frame.
"""
lfk = self.link_fk(cfg=cfg, links=links)
fk = OrderedDict()
for link in lfk:
for visual in link.visuals:
fk[visual.geometry] = lfk[link].dot(visual.origin)
return fk
def visual_geometry_fk_batch(self, cfgs=None, links=None):
"""Computes the poses of the URDF's visual geometries using fk.
Parameters
----------
cfgs : dict, list of dict, or (n,m), float
One of the following: (A) a map from joints or joint names to vectors
of joint configuration values, (B) a list of maps from joints or joint names
to single configuration values, or (C) a list of ``n`` configuration vectors,
each of which has a vector with an entry for each actuated joint.
links : list of str or list of :class:`.Link`
The links or names of links to perform forward kinematics on.
Only geometries from these links will be in the returned map.
If not specified, all links are returned.
Returns
-------
fk : dict
A map from :class:`Geometry` objects that are part of the visual
elements of the specified links to the 4x4 homogenous transform
matrices that position them relative to the base link's frame.
"""
lfk = self.link_fk_batch(cfgs=cfgs, links=links)
fk = OrderedDict()
for link in lfk:
for visual in link.visuals:
fk[visual.geometry] = np.matmul(lfk[link], visual.origin)
return fk
def visual_trimesh_fk(self, cfg=None, links=None):
"""Computes the poses of the URDF's visual trimeshes using fk.
Parameters
----------
cfg : dict or (n), float
A map from joints or joint names to configuration values for
each joint, or a list containing a value for each actuated joint
in sorted order from the base link.
If not specified, all joints are assumed to be in their default
configurations.
links : list of str or list of :class:`.Link`
The links or names of links to perform forward kinematics on.
Only trimeshes from these links will be in the returned map.
If not specified, all links are returned.
Returns
-------
fk : dict
A map from :class:`~trimesh.base.Trimesh` objects that are
part of the visual geometry of the specified links to the
4x4 homogenous transform matrices that position them relative
to the base link's frame.
"""
lfk = self.link_fk(cfg=cfg, links=links)
fk = OrderedDict()
for link in lfk:
for visual in link.visuals:
for mesh in visual.geometry.meshes:
pose = lfk[link].dot(visual.origin)
if visual.geometry.mesh is not None:
if visual.geometry.mesh.scale is not None:
S = np.eye(4, dtype=np.float64)
S[:3,:3] = np.diag(visual.geometry.mesh.scale)
pose = pose.dot(S)
fk[mesh] = pose
return fk
def visual_trimesh_fk_batch(self, cfgs=None, links=None):
"""Computes the poses of the URDF's visual trimeshes using fk.
Parameters
----------
cfgs : dict, list of dict, or (n,m), float
One of the following: (A) a map from joints or joint names to vectors
of joint configuration values, (B) a list of maps from joints or joint names
to single configuration values, or (C) a list of ``n`` configuration vectors,
each of which has a vector with an entry for each actuated joint.
links : list of str or list of :class:`.Link`
The links or names of links to perform forward kinematics on.
Only trimeshes from these links will be in the returned map.
If not specified, all links are returned.
Returns
-------
fk : dict
A map from :class:`~trimesh.base.Trimesh` objects that are
part of the visual geometry of the specified links to the
4x4 homogenous transform matrices that position them relative
to the base link's frame.
"""
lfk = self.link_fk_batch(cfgs=cfgs, links=links)
fk = OrderedDict()
for link in lfk:
for visual in link.visuals:
for mesh in visual.geometry.meshes:
poses = np.matmul(lfk[link], visual.origin)
if visual.geometry.mesh is not None:
if visual.geometry.mesh.scale is not None:
S = np.eye(4, dtype=np.float64)
S[:3,:3] = np.diag(visual.geometry.mesh.scale)
poses = np.matmul(poses, S)
fk[mesh] = poses
return fk
def collision_geometry_fk(self, cfg=None, links=None):
"""Computes the poses of the URDF's collision geometries using fk.
Parameters
----------
cfg : dict or (n), float
A map from joints or joint names to configuration values for
each joint, or a list containing a value for each actuated joint
in sorted order from the base link.
If not specified, all joints are assumed to be in their default
configurations.
links : list of str or list of :class:`.Link`
The links or names of links to perform forward kinematics on.
Only geometries from these links will be in the returned map.
If not specified, all links are returned.
Returns
-------
fk : dict
A map from :class:`Geometry` objects that are part of the collision
elements of the specified links to the 4x4 homogenous transform
matrices that position them relative to the base link's frame.
"""
lfk = self.link_fk(cfg=cfg, links=links)
fk = OrderedDict()
for link in lfk:
for collision in link.collisions:
fk[collision] = lfk[link].dot(collision.origin)
return fk
def collision_geometry_fk_batch(self, cfgs=None, links=None):
"""Computes the poses of the URDF's collision geometries using fk.
Parameters
----------
cfgs : dict, list of dict, or (n,m), float
One of the following: (A) a map from joints or joint names to vectors
of joint configuration values, (B) a list of maps from joints or joint names
to single configuration values, or (C) a list of ``n`` configuration vectors,
each of which has a vector with an entry for each actuated joint.
links : list of str or list of :class:`.Link`
The links or names of links to perform forward kinematics on.
Only geometries from these links will be in the returned map.
If not specified, all links are returned.
Returns
-------
fk : dict
A map from :class:`Geometry` objects that are part of the collision
elements of the specified links to the 4x4 homogenous transform
matrices that position them relative to the base link's frame.
"""
lfk = self.link_fk_batch(cfgs=cfgs, links=links)
fk = OrderedDict()
for link in lfk:
for collision in link.collisions:
fk[collision] = np.matmul(lfk[link], collision.origin)
return fk
def collision_trimesh_fk(self, cfg=None, links=None):
"""Computes the poses of the URDF's collision trimeshes using fk.
Parameters
----------
cfg : dict or (n), float
A map from joints or joint names to configuration values for
each joint, or a list containing a value for each actuated joint
in sorted order from the base link.
If not specified, all joints are assumed to be in their default
configurations.
links : list of str or list of :class:`.Link`
The links or names of links to perform forward kinematics on.
Only trimeshes from these links will be in the returned map.
If not specified, all links are returned.
Returns
-------
fk : dict
A map from :class:`~trimesh.base.Trimesh` objects that are
part of the collision geometry of the specified links to the
4x4 homogenous transform matrices that position them relative
to the base link's frame.
"""
lfk = self.link_fk(cfg=cfg, links=links)
fk = OrderedDict()
for link in lfk:
pose = lfk[link]
cm = link.collision_mesh
if cm is not None:
fk[cm] = pose
return fk
def collision_trimesh_fk_batch(self, cfgs=None, links=None):
"""Computes the poses of the URDF's collision trimeshes using fk.
Parameters
----------
cfgs : dict, list of dict, or (n,m), float
One of the following: (A) a map from joints or joint names to vectors
of joint configuration values, (B) a list of maps from joints or joint names
to single configuration values, or (C) a list of ``n`` configuration vectors,
each of which has a vector with an entry for each actuated joint.
links : list of str or list of :class:`.Link`
The links or names of links to perform forward kinematics on.
Only trimeshes from these links will be in the returned map.
If not specified, all links are returned.
Returns
-------
fk : dict
A map from :class:`~trimesh.base.Trimesh` objects that are
part of the collision geometry of the specified links to the
4x4 homogenous transform matrices that position them relative
to the base link's frame.
"""
lfk = self.link_fk_batch(cfgs=cfgs, links=links)
fk = OrderedDict()
for link in lfk:
poses = lfk[link]
cm = link.collision_mesh
if cm is not None:
fk[cm] = poses
return fk
def animate(self, cfg_trajectory=None, loop_time=3.0, use_collision=False):
"""Animate the URDF through a configuration trajectory.
Parameters
----------
cfg_trajectory : dict or (m,n) float
A map from joints or joint names to lists of configuration values
for each joint along the trajectory, or a vector of
vectors where the second dimension contains a value for each joint.
If not specified, all joints will articulate from limit to limit.
The trajectory steps are assumed to be equally spaced out in time.
loop_time : float
The time to loop the animation for, in seconds. The trajectory
will play fowards and backwards during this time, ending
at the inital configuration.
use_collision : bool
If True, the collision geometry is visualized instead of
the visual geometry.
Examples
--------
You can run this without specifying a ``cfg_trajectory`` to view
the full articulation of the URDF
>>> robot = URDF.load('ur5.urdf')
>>> robot.animate()
.. image:: /_static/ur5.gif
>>> ct = {'shoulder_pan_joint': [0.0, 2 * np.pi]}
>>> robot.animate(cfg_trajectory=ct)
.. image:: /_static/ur5_shoulder.gif
>>> ct = {
... 'shoulder_pan_joint' : [-np.pi / 4, np.pi / 4],
... 'shoulder_lift_joint' : [0.0, -np.pi / 2.0],
... 'elbow_joint' : [0.0, np.pi / 2.0]
... }
>>> robot.animate(cfg_trajectory=ct)
.. image:: /_static/ur5_three_joints.gif
"""
import pyrender # Save pyrender import for here for CI
ct = cfg_trajectory
traj_len = None # Length of the trajectory in steps
ct_np = {} # Numpyified trajectory
# If trajectory not specified, articulate between the limits.
if ct is None:
lb, ub = self.joint_limit_cfgs
if len(lb) > 0:
traj_len = 2
ct_np = {k: np.array([lb[k], ub[k]]) for k in lb}
# If it is specified, parse it and extract the trajectory length.
elif isinstance(ct, dict):
if len(ct) > 0:
for k in ct:
val = np.asanyarray(ct[k]).astype(np.float64)
if traj_len is None:
traj_len = len(val)
elif traj_len != len(val):
raise ValueError('Trajectories must be same length')
ct_np[k] = val
elif isinstance(ct, (list, tuple, np.ndarray)):
ct = np.asanyarray(ct).astype(np.float64)
if ct.ndim == 1:
ct = ct.reshape(-1, 1)
if ct.ndim != 2 or ct.shape[1] != len(self.actuated_joints):
raise ValueError('Cfg trajectory must have entry for each joint')
ct_np = {j: ct[:,i] for i, j in enumerate(self.actuated_joints)}
else:
raise TypeError('Invalid type for cfg_trajectory: {}'
.format(type(cfg_trajectory)))
# If there isn't a trajectory to render, just show the model and exit
if len(ct_np) == 0 or traj_len < 2:
self.show(use_collision=use_collision)
return
# Create an array of times that loops from 0 to 1 and back to 0
fps = 30.0
n_steps = int(loop_time * fps / 2.0)
times = np.linspace(0.0, 1.0, n_steps)
times = np.hstack((times, np.flip(times)))
# Create bin edges in the range [0, 1] for each trajectory step
bins = np.arange(traj_len) / (float(traj_len) - 1.0)
# Compute alphas for each time
right_inds = np.digitize(times, bins, right=True)
right_inds[right_inds == 0] = 1
alphas = ((bins[right_inds] - times) /
(bins[right_inds] - bins[right_inds - 1]))
# Create the new interpolated trajectory
new_ct = {}
for k in ct_np:
new_ct[k] = (alphas * ct_np[k][right_inds - 1] +
(1.0 - alphas) * ct_np[k][right_inds])
# Create the scene
if use_collision:
fk = self.collision_trimesh_fk()
else:
fk = self.visual_trimesh_fk()
node_map = {}
scene = pyrender.Scene()
for tm in fk:
pose = fk[tm]
mesh = pyrender.Mesh.from_trimesh(tm, smooth=False)
node = scene.add(mesh, pose=pose)
node_map[tm] = node
# Get base pose to focus on
blp = self.link_fk(links=[self.base_link])[self.base_link]
# Pop the visualizer asynchronously
v = pyrender.Viewer(scene, run_in_thread=True,
use_raymond_lighting=True,
view_center=blp[:3,3])
# Now, run our loop
i = 0
while v.is_active:
cfg = {k: new_ct[k][i] for k in new_ct}
i = (i + 1) % len(times)
if use_collision:
fk = self.collision_trimesh_fk(cfg=cfg)
else:
fk = self.visual_trimesh_fk(cfg=cfg)
v.render_lock.acquire()
for mesh in fk:
pose = fk[mesh]
node_map[mesh].matrix = pose
v.render_lock.release()
time.sleep(1.0 / fps)
def show(self, cfg=None, use_collision=False):
"""Visualize the URDF in a given configuration.
Parameters
----------
cfg : dict or (n), float
A map from joints or joint names to configuration values for
each joint, or a list containing a value for each actuated joint
in sorted order from the base link.
If not specified, all joints are assumed to be in their default
configurations.
use_collision : bool
If True, the collision geometry is visualized instead of
the visual geometry.
"""
import pyrender # Save pyrender import for here for CI
if use_collision:
fk = self.collision_trimesh_fk(cfg=cfg)
else:
fk = self.visual_trimesh_fk(cfg=cfg)
scene = pyrender.Scene()
for tm in fk:
pose = fk[tm]
mesh = pyrender.Mesh.from_trimesh(tm, smooth=False)
scene.add(mesh, pose=pose)
pyrender.Viewer(scene, use_raymond_lighting=True)
def copy(self, name=None, prefix='', scale=None, collision_only=False):
"""Make a deep copy of the URDF.
Parameters
----------
name : str, optional
A name for the new URDF. If not specified, ``self.name`` is used.
prefix : str, optional
A prefix to apply to all names except for the base URDF name.
scale : float or (3,) float, optional
A scale to apply to the URDF.
collision_only : bool, optional
If True, all visual geometry is redirected to the collision geometry.
Returns
-------
copy : :class:`.URDF`
The copied URDF.
"""
return URDF(
name = (name if name else self.name),
links=[v.copy(prefix, scale, collision_only) for v in self.links],
joints=[v.copy(prefix, scale) for v in self.joints],
transmissions=[v.copy(prefix, scale) for v in self.transmissions],
materials=[v.copy(prefix, scale) for v in self.materials],
other_xml=self.other_xml
)
def save(self, file_obj):
"""Save this URDF to a file.
Parameters
----------
file_obj : str or file-like object
The file to save the URDF to. Should be the path to the
``.urdf`` XML file. Any paths in the URDF should be specified
as relative paths to the ``.urdf`` file instead of as ROS
resources.
Returns
-------
urdf : :class:`.URDF`
The parsed URDF.
"""
if isinstance(file_obj, six.string_types):
path, _ = os.path.split(file_obj)
else:
path, _ = os.path.split(os.path.realpath(file_obj.name))
node = self._to_xml(None, path)
tree = ET.ElementTree(node)
tree.write(file_obj, pretty_print=True,
xml_declaration=True, encoding='utf-8')
def join(self, other, link, origin=None, name=None, prefix=''):
"""Join another URDF to this one by rigidly fixturing the two at a link.
Parameters
----------
other : :class:.`URDF`
Another URDF to fuze to this one.
link : :class:`.Link` or str
The link of this URDF to attach the other URDF to.
origin : (4,4) float, optional
The location in this URDF's link frame to attach the base link of the other
URDF at.
name : str, optional
A name for the new URDF.
prefix : str, optional
If specified, all joints and links from the (other) mesh will be pre-fixed
with this value to avoid name clashes.
Returns
-------
:class:`.URDF`
The new URDF.
"""
myself = self.copy()
other = other.copy(prefix=prefix)
# Validate
link_names = set(myself.link_map.keys())
other_link_names = set(other.link_map.keys())
if len(link_names.intersection(other_link_names)) > 0:
raise ValueError('Cannot merge two URDFs with shared link names')
joint_names = set(myself.joint_map.keys())
other_joint_names = set(other.joint_map.keys())
if len(joint_names.intersection(other_joint_names)) > 0:
raise ValueError('Cannot merge two URDFs with shared joint names')
links = myself.links + other.links
joints = myself.joints + other.joints
transmissions = myself.transmissions + other.transmissions
materials = myself.materials + other.materials
if name is None:
name = self.name
# Create joint that links the two rigidly
joints.append(Joint(
name='{}_join_{}{}_joint'.format(self.name, prefix, other.name),
joint_type='fixed',
parent=link if isinstance(link, str) else link.name,
child=other.base_link.name,
origin=origin
))
return URDF(name=name, links=links, joints=joints, transmissions=transmissions,
materials=materials)
def _merge_materials(self):
"""Merge the top-level material set with the link materials.
"""
for link in self.links:
for v in link.visuals:
if v.material is None:
continue
if v.material.name in self.material_map:
v.material = self._material_map[v.material.name]
else:
self._materials.append(v.material)
self._material_map[v.material.name] = v.material
@staticmethod
def load(file_obj):
"""Load a URDF from a file.
Parameters
----------
file_obj : str or file-like object
The file to load the URDF from. Should be the path to the
``.urdf`` XML file. Any paths in the URDF should be specified
as relative paths to the ``.urdf`` file instead of as ROS
resources.
Returns
-------
urdf : :class:`.URDF`
The parsed URDF.
"""
if isinstance(file_obj, six.string_types):
if os.path.isfile(file_obj):
parser = ET.XMLParser(remove_comments=True,
remove_blank_text=True)
tree = ET.parse(file_obj, parser=parser)
path, _ = os.path.split(file_obj)
else:
raise ValueError('{} is not a file'.format(file_obj))
else:
parser = ET.XMLParser(remove_comments=True, remove_blank_text=True)
tree = ET.parse(file_obj, parser=parser)
path, _ = os.path.split(file_obj.name)
node = tree.getroot()
return URDF._from_xml(node, path)
def _validate_joints(self):
"""Raise an exception of any joints are invalidly specified.
Checks for the following:
- Joint parents are valid link names.
- Joint children are valid link names that aren't the same as parent.
- Joint mimics have valid joint names that aren't the same joint.
Returns
-------
actuated_joints : list of :class:`.Joint`
The joints in the model that are independently controllable.
"""
actuated_joints = []
for joint in self.joints:
if joint.parent not in self._link_map:
raise ValueError('Joint {} has invalid parent link name {}'
.format(joint.name, joint.parent))
if joint.child not in self._link_map:
raise ValueError('Joint {} has invalid child link name {}'
.format(joint.name, joint.child))
if joint.child == joint.parent:
raise ValueError('Joint {} has matching parent and child'
.format(joint.name))
if joint.mimic is not None:
if joint.mimic.joint not in self._joint_map:
raise ValueError(
'Joint {} has an invalid mimic joint name {}'
.format(joint.name, joint.mimic.joint)
)
if joint.mimic.joint == joint.name:
raise ValueError(
'Joint {} set up to mimic itself'
.format(joint.mimic.joint)
)
elif joint.joint_type != 'fixed':
actuated_joints.append(joint)
# Do a depth-first search
return actuated_joints
def _sort_joints(self, joints):
"""Sort joints by ascending distance from the base link (topologically).
Parameters
----------
joints : list of :class:`.Joint`
The joints to sort.
Returns
-------
joints : list of :class:`.Joint`
The sorted joints.
"""
lens = []
for joint in joints:
child_link = self._link_map[joint.child]
lens.append(len(self._paths_to_base[child_link]))
order = np.argsort(lens)
return np.array(joints)[order].tolist()
def _validate_transmissions(self):
"""Raise an exception of any transmissions are invalidly specified.
Checks for the following:
- Transmission joints have valid joint names.
"""
for t in self.transmissions:
for joint in t.joints:
if joint.name not in self._joint_map:
raise ValueError('Transmission {} has invalid joint name '
'{}'.format(t.name, joint.name))
def _validate_graph(self):
"""Raise an exception if the link-joint structure is invalid.
Checks for the following:
- The graph is connected in the undirected sense.
- The graph is acyclic in the directed sense.
- The graph has only one base link.
Returns
-------
base_link : :class:`.Link`
The base link of the URDF.
end_links : list of :class:`.Link`
The end links of the URDF.
"""
# Check that the link graph is weakly connected
if not nx.is_weakly_connected(self._G):
link_clusters = []
for cc in nx.weakly_connected_components(self._G):
cluster = []
for n in cc:
cluster.append(n.name)
link_clusters.append(cluster)
message = ('Links are not all connected. '
'Connected components are:')
for lc in link_clusters:
message += '\n\t'
for n in lc:
message += ' {}'.format(n)
raise ValueError(message)
# Check that link graph is acyclic
if not nx.is_directed_acyclic_graph(self._G):
raise ValueError('There are cycles in the link graph')
# Ensure that there is exactly one base link, which has no parent
base_link = None
end_links = []
for n in self._G:
if len(nx.descendants(self._G, n)) == 0:
if base_link is None:
base_link = n
else:
raise ValueError('Links {} and {} are both base links!'
.format(n.name, base_link.name))
if len(nx.ancestors(self._G, n)) == 0:
end_links.append(n)
return base_link, end_links
def _process_cfg(self, cfg):
"""Process a joint configuration spec into a dictionary mapping
joints to configuration values.
"""
joint_cfg = {}
if cfg is None:
return joint_cfg
if isinstance(cfg, dict):
for joint in cfg:
if isinstance(joint, six.string_types):
joint_cfg[self._joint_map[joint]] = cfg[joint]
elif isinstance(joint, Joint):
joint_cfg[joint] = cfg[joint]
elif isinstance(cfg, (list, tuple, np.ndarray)):
if len(cfg) != len(self.actuated_joints):
raise ValueError('Cfg must have same length as actuated joints '
'if specified as a numerical array')
for joint, value in zip(self.actuated_joints, cfg):
joint_cfg[joint] = value
else:
raise TypeError('Invalid type for config')
return joint_cfg
def _process_cfgs(self, cfgs):
"""Process a list of joint configurations into a dictionary mapping joints to
configuration values.
This should result in a dict mapping each joint to a list of cfg values, one
per joint.
"""
joint_cfg = {j : [] for j in self.actuated_joints}
n_cfgs = None
if isinstance(cfgs, dict):
for joint in cfgs:
if isinstance(joint, six.string_types):
joint_cfg[self._joint_map[joint]] = cfgs[joint]
else:
joint_cfg[joint] = cfgs[joint]
if n_cfgs is None:
n_cfgs = len(cfgs[joint])
elif isinstance(cfgs, (list, tuple, np.ndarray)):
n_cfgs = len(cfgs)
if isinstance(cfgs[0], dict):
for cfg in cfgs:
for joint in cfg:
if isinstance(joint, six.string_types):
joint_cfg[self._joint_map[joint]].append(cfg[joint])
else:
joint_cfg[joint].append(cfg[joint])
elif cfgs[0] is None:
pass
else:
cfgs = np.asanyarray(cfgs, dtype=np.float64)
for i, j in enumerate(self.actuated_joints):
joint_cfg[j] = cfgs[:,i]
else:
raise ValueError('Incorrectly formatted config array')
for j in joint_cfg:
if len(joint_cfg[j]) == 0:
joint_cfg[j] = None
elif len(joint_cfg[j]) != n_cfgs:
raise ValueError('Inconsistent number of configurations for joints')
return joint_cfg, n_cfgs
@classmethod
def _from_xml(cls, node, path):
valid_tags = set(['joint', 'link', 'transmission', 'material'])
kwargs = cls._parse(node, path)
extra_xml_node = ET.Element('extra')
for child in node:
if child.tag not in valid_tags:
extra_xml_node.append(child)
data = ET.tostring(extra_xml_node)
kwargs['other_xml'] = data
return URDF(**kwargs)
def _to_xml(self, parent, path):
node = self._unparse(path)
if self.other_xml:
extra_tree = ET.fromstring(self.other_xml)
for child in extra_tree:
node.append(child)
return node
|
the-stack_0_24313 | #!/usr/bin/env python3
import os
import argparse
import subprocess
from colors import *
from utils import *
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
def vm_boot(vm, freeze, cpu, memory, kernel, dir, port):
cmd_boot = """\
set -e;
sudo qemu-system-x86_64 \
-s \
{freeze} \
-nographic \
-enable-kvm \
-cpu host \
-smp cpus={cpu} \
-m {memory} \
-hda {vm} \
-device e1000,netdev=net0 \
-net nic \
-netdev user,id=net0,hostfwd=tcp::{port}-:22 \
"""
cmd_dir = """\
-kernel {kernel} \
-append "nokaslr root=/dev/sda4 console=ttyS0" \
-fsdev local,security_model=passthrough,id=fsdev0,path={dir} \
-device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=/dev/host \
"""
cmd = cmd_boot.format(vm=vm, freeze=freeze, cpu=cpu, \
memory=memory, port=port)
if kernel != None and dir != None:
cmd = cmd + " " + cmd_dir.format(kernel=kernel, dir=dir)
print(cmd)
os.system(cmd)
if __name__ == '__main__':
# options
parser = argparse.ArgumentParser(prog='vm-boot.py')
parser.add_argument('-v', '--vm',
help="VM image")
parser.add_argument('-S', '--freeze', action='store_true', default=False,
help="freeze CPU at startup")
parser.add_argument('-c', '--cpu', default="2",
help="number of CPU: default = 2")
parser.add_argument('-m', '--memory', default="4G",
help="memory size: default = 4G")
parser.add_argument('-k', '--kernel',
help="kernel boot image")
parser.add_argument('-d', '--dir', default=os.path.join(CUR_DIR, ".."),
help="shared directory: mount tag = /dev/host, default = ../")
parser.add_argument('-p', '--port', default=port_number(),
help="ssh port number: default is %s" % port_number())
# options parsing
args = parser.parse_args()
if args.vm is None:
parser.print_help()
exit(1)
# boot
vm_boot(args.vm, "-S" if args.freeze else "", args.cpu, args.memory, \
args.kernel, args.dir, args.port)
|
the-stack_0_24314 | """
Add a custom VERBOSE logging level between DEBUG and INFO.
"""
import logging
VERBOSE = 15
VERBOSE_NAME = "VERBOSE"
def verbose(msg, *args, **kwargs) -> None:
"""
Verbose logging function.
:param msg: The message to log as verbose
:param args: Standard args
:param kwargs: Standard kwargs
:return: No meaningful return
"""
if logging.getLogger().isEnabledFor(VERBOSE):
logging.log(VERBOSE, msg)
logging.addLevelName(VERBOSE, VERBOSE_NAME)
logging.verbose = verbose
logging.Logger.verbose = verbose
def configure_logging(loglevel: str) -> None:
"""
Configure basic logging to the console.
:param loglevel: level name from the command line or default
:return: No meaningful return
"""
if logging.getLevelName(loglevel) == "Level {0}".format(loglevel):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=loglevel, format='%(asctime)s - %(levelname)s - %(message)s')
|
the-stack_0_24315 | def htmlEndTagByStartTag(startTag):
'''
You are implementing your own HTML editor. To make it more comfortable for developers you would like to add an auto-completion feature to it.
Given the starting HTML tag, find the appropriate end tag which your editor should propose.
If you are not familiar with HTML, consult with this note.
Example
For startTag = "<button type='button' disabled>", the output should be
htmlEndTagByStartTag(startTag) = "</button>";
For startTag = "<i>", the output should be
htmlEndTagByStartTag(startTag) = "</i>".
'''
tag = ""
for c in startTag:
if c == " " or c == ">":
return "</"+ tag +">"
elif c != "<":
tag += c
print(htmlEndTagByStartTag("<button type='button' disabled>"))
print(htmlEndTagByStartTag("<i>")) |
the-stack_0_24316 | # -*- coding:utf-8 -*-
# author:keyoung
# email:[email protected]
# date:2019-10-11
def draw_grid(amount_card, read_file, savefile_name):
from readList import read_csv_to_list
import xlsxwriter
workbook = xlsxwriter.Workbook(savefile_name) # 新建excel表
worksheet = workbook.add_worksheet('sheet1') # 新建sheet(sheet的名称为"sheet1")
# 设置右对齐格式
align_right = workbook.add_format()
align_right.set_align("right")
align_right.set_bottom(1)
align_right.set_bottom_color("black")
# 设置单元格格式1
border1 = workbook.add_format()
border1.set_bottom(1)
border1.set_bottom_color("black")
# 设置单元格格式2
border2 = workbook.add_format()
border2.set_left(2)
border2.set_left_color("black")
border2.set_right(2)
border2.set_right_color("black")
border2.set_top(2)
border2.set_top_color("black")
border2.set_bottom(1)
border2.set_bottom_color("black")
border2.set_valign("vcenter")
border2.set_align("center")
# 设置单元格格式3
border3 = workbook.add_format()
border3.set_left(1)
border3.set_left_color("black")
border3.set_right(1)
border3.set_right_color("black")
border3.set_top(1)
border3.set_top_color("black")
border3.set_bottom(1)
border3.set_bottom_color("black")
# 设置一个9号字体
border3_with_smaller_font = workbook.add_format()
border3_with_smaller_font.set_left(1)
border3_with_smaller_font.set_left_color("black")
border3_with_smaller_font.set_right(1)
border3_with_smaller_font.set_right_color("black")
border3_with_smaller_font.set_top(1)
border3_with_smaller_font.set_top_color("black")
border3_with_smaller_font.set_bottom(1)
border3_with_smaller_font.set_bottom_color("black")
border3_with_smaller_font.set_font_size(9)
# 设置一个8号字体
border3_with_very_smaller_font = workbook.add_format()
border3_with_very_smaller_font.set_left(1)
border3_with_very_smaller_font.set_left_color("black")
border3_with_very_smaller_font.set_right(1)
border3_with_very_smaller_font.set_right_color("black")
border3_with_very_smaller_font.set_top(1)
border3_with_very_smaller_font.set_top_color("black")
border3_with_very_smaller_font.set_bottom(1)
border3_with_very_smaller_font.set_bottom_color("black")
border3_with_very_smaller_font.set_font_size(8)
# 设置一个居中格式
border3_with_center = workbook.add_format()
border3_with_center.set_left(1)
border3_with_center.set_left_color("black")
border3_with_center.set_right(1)
border3_with_center.set_right_color("black")
border3_with_center.set_top(1)
border3_with_center.set_top_color("black")
border3_with_center.set_bottom(1)
border3_with_center.set_bottom_color("black")
border3_with_center.set_align("center")
# rewrite drawGrid
rownum = 0
colnum = 0
print("绘制卡片中......")
# 这里稍微处理一下amount_card,使得画卡片的时候永远是偶数张卡片,方便打印控制,而且不会使处理数据的时候混乱
if amount_card % 2 == 0:
draw_card_amount = amount_card
else:
draw_card_amount = amount_card + 1
for page in range(draw_card_amount):
if rownum >= (amount_card * 18) / 2: # 一个格子需要18行
# 这是控制换列
colnum = 5
rownum = 0
# 写前三行
worksheet.write(rownum, colnum, "科 名")
worksheet.write(rownum + 1, colnum, "学 名")
worksheet.write(rownum + 2, colnum, "中 名")
worksheet.write(rownum, colnum + 1, None, border1)
worksheet.write(rownum + 1, colnum + 1, None, border1)
worksheet.write(rownum + 2, colnum + 1, None, border1)
worksheet.write(rownum + 4, colnum, "登记号", border2)
worksheet.write(rownum + 4, colnum + 1, "采集地点", border2)
worksheet.write(rownum + 4, colnum + 2, "采集日期", border2)
worksheet.write(rownum + 4, colnum + 3, "标本概况", border2)
worksheet.write(rownum + 9, colnum, "登记号", border2)
worksheet.write(rownum + 9, colnum + 1, "采集地点", border2)
worksheet.write(rownum + 9, colnum + 2, "采集日期", border2)
worksheet.write(rownum + 9, colnum + 3, "标本概况", border2)
# 写个编号吧,如果不需要可以用随时注释掉
worksheet.write(rownum, colnum + 3, "第{}张".format(page + 1), align_right)
# 设置样式
worksheet.set_column(0, 0, 7.22) # 设置A列宽度
worksheet.set_column(5, 5, 7.22) # 设置F列宽度
worksheet.set_column(2, 2, 11.22) # 设置C列宽度
worksheet.set_column(7, 7, 11.22) # 设置H列宽度
worksheet.set_column(1, 1, 14.22) # 设置B列宽度
worksheet.set_column(6, 6, 14.22) # 设置G列宽度
worksheet.set_column(3, 3, 25.22) # 设置D列宽度
worksheet.set_column(8, 8, 25.22) # 设置I列宽度
worksheet.set_column(4, 4, 5.11) # 设置E列宽度,为了裁纸的时候舒服一点
# 调整行高
worksheet.set_row(rownum, 25.0, None)
worksheet.set_row(rownum + 1, 25.0, None)
worksheet.set_row(rownum + 2, 25.0, None)
worksheet.set_row(rownum + 4, 20.6, None)
worksheet.set_row(rownum + 5, 20.6, None)
worksheet.set_row(rownum + 6, 20.6, None)
worksheet.set_row(rownum + 7, 20.6, None)
worksheet.set_row(rownum + 9, 20.6, None)
worksheet.set_row(rownum + 10, 20.6, None)
worksheet.set_row(rownum + 11, 20.6, None)
worksheet.set_row(rownum + 12, 20.6, None)
worksheet.set_row(rownum + 13, 20.6, None)
worksheet.set_row(rownum + 14, 20.6, None)
worksheet.set_row(rownum + 15, 20.6, None)
worksheet.set_row(rownum + 16, 20.6, None)
worksheet.set_row(rownum + 17, 118.7, None)
worksheet.write_blank(rownum, colnum + 2, "", border1)
worksheet.write_blank(rownum + 1, colnum + 2, "", border1)
worksheet.write_blank(rownum + 1, colnum + 3, "", border1)
worksheet.write_blank(rownum + 2, colnum + 2, "", border1)
worksheet.write_blank(rownum + 2, colnum + 3, "", border1)
for j in range(5, 8):
for q in range(0, 4):
worksheet.write_blank(rownum + j, colnum + q, "", border3)
for j in range(10, 17):
for q in range(0, 4):
worksheet.write_blank(rownum + j, colnum + q, "", border3)
rownum = rownum + 18
# 这里定义一个内部函数,用来把学名的种名属名(specific_generic_name)和作者(author)分离出来
def split_scientific_name(scientific_name: str):
import re
global specific_generic_name, author_name
specific_generic_name = re.findall("(^[A-Z].*? .*?) .*", scientific_name)
if specific_generic_name:
specific_generic_name = specific_generic_name[0]
author_name = scientific_name[len(specific_generic_name) + 1:]
if len(author_name) == 0:
author_name = " "
return specific_generic_name, author_name
# rewrite handleListDate(重写老版的handleListDate)
print("处理数据中......")
lst = read_csv_to_list(read_file)
amount_of_lst = len(lst)
current_row = lst.pop() # 抽出数据
italic = workbook.add_format({'italic': True})
rownum = 0 # 行号
colnum = 0 # 列号
# 这里可以选者两种对比方式,7是选择学名,感觉科学一点
check_name = current_row[7]
# 先把第一个给填了,等会再来对比分析
generic_name, author = split_scientific_name(current_row[7])
worksheet.write(rownum, colnum + 1, current_row[2], border1) # 写科名
worksheet.write_rich_string(rownum + 1, colnum + 1, italic, f"{generic_name} ", f"{author}", border1) # 写学名
worksheet.write(rownum + 2, colnum + 1, current_row[1], border1) # 写中文名
worksheet.write(rownum + 5, colnum, current_row[0], border3) # 写登记号
if len(current_row[10]) < 8:
worksheet.write(rownum + 5, colnum + 1, current_row[10], border3) # 写采集地点
else:
worksheet.write(rownum + 5, colnum + 1, current_row[10], border3_with_smaller_font) # 写采集地点
worksheet.write(rownum + 5, colnum + 2, current_row[13], border3_with_center) # 写采集日期
if len(current_row[11] + "采集 " + current_row[12]) < 17:
worksheet.write(rownum + 5, colnum + 3, current_row[11] + "采集 " + current_row[12], border3) # 写标本概况
elif 17 <= len(current_row[11] + "采集 " + current_row[12]) < 24:
worksheet.write(rownum + 5, colnum + 3, current_row[11] + "采集 " + current_row[12], border3_with_smaller_font) # 写标本概况
else:
worksheet.write(rownum + 5, colnum + 3, current_row[11] + "采集 " + current_row[12],
border3_with_very_smaller_font) # 写标本概况
# 第一条数据录完之后就要对比分析了
row_counter = 1 # 设置一个行计数器
while lst: # 当列表lst不为空,就不断抽取数据
if rownum > (amount_card * 18) / 2: # 这个是控制换到另一边
colnum = 5
rownum = 0
current_row = lst.pop() # 又抽取一条数据
if current_row[7] == check_name:
if row_counter == 3:
# 因为要空一行,所以要多加一个判断
row_counter = 5
if row_counter > 11:
# 大于这么多就准备换页了
row_counter = 0
rownum = rownum + 18
if rownum >= (page * 9) + 1: # 这个数字应该还要计算一下,天灵灵地灵灵,保佑不出错
rownum = 0
colnum = 5
generic_name, author = split_scientific_name(current_row[7])
worksheet.write(rownum, colnum + 1, current_row[2], border1) # 写科名
worksheet.write_rich_string(rownum + 1, colnum + 1, italic, f"{generic_name} ", f"{author}", border1) # 写学名
worksheet.write(rownum + 2, colnum + 1, current_row[1], border1) # 写中文名
# 如果相等,意思就是同一种标本
# 同一种标本就不用再写科名,学名和中文名了
worksheet.write(rownum + 5 + row_counter, colnum, current_row[0], border3) # 写登记号
if len(current_row[10]) < 8:
worksheet.write(rownum + 5 + row_counter, colnum + 1, current_row[10], border3) # 写采集地点
else:
worksheet.write(rownum + 5 + row_counter, colnum + 1, current_row[10], border3_with_smaller_font) # 写采集地点
worksheet.write(rownum + 5 + row_counter, colnum + 2, current_row[13], border3_with_center) # 写采集日期
if len(current_row[11] + "采集 " + current_row[12]) < 17:
worksheet.write(rownum + 5 + row_counter, colnum + 3, current_row[11] + "采集 " + current_row[12], border3) # 写标本概况
elif 17 <= len(current_row[11] + "采集 " + current_row[12]) < 24:
worksheet.write(rownum + 5 + row_counter, colnum + 3, current_row[11] + "采集 " + current_row[12],
border3_with_smaller_font) # 写标本概况
else:
worksheet.write(rownum + 5 + row_counter, colnum + 3, current_row[11] + "采集 " + current_row[12],
border3_with_very_smaller_font) # 写标本概况
row_counter = row_counter + 1
else:
# 这是不等于的情况,意思就是不是同一种标本
# 就要跳到下一页去了
rownum = rownum + 18
# 在前后不同的情况下也要考虑换列的情况
if rownum >= (page * 9) + 1:
rownum = 0
colnum = 5
generic_name, author = split_scientific_name(current_row[7])
worksheet.write(rownum, colnum + 1, current_row[2], border1) # 写科名
worksheet.write_rich_string(rownum + 1, colnum + 1, italic, f"{generic_name} ", f"{author}", border1) # 写学名
worksheet.write(rownum + 2, colnum + 1, current_row[1], border1) # 写中文名
worksheet.write(rownum + 5, colnum, current_row[0], border3) # 写登记号
if len(current_row[10]):
worksheet.write(rownum + 5, colnum + 1, current_row[10], border3) # 写采集地点
else:
worksheet.write(rownum + 5, colnum + 1, current_row[10], border3_with_smaller_font) # 写采集地点
worksheet.write(rownum + 5, colnum + 2, current_row[13], border3_with_center) # 写采集日期
if len(current_row[11] + "采集 " + current_row[12]) < 17:
worksheet.write(rownum + 5, colnum + 3, current_row[11] + "采集 " + current_row[12], border3) # 写标本概况
elif 17 <= len(current_row[11] + "采集 " + current_row[12]) < 24:
worksheet.write(rownum + 5, colnum + 3, current_row[11] + "采集 " + current_row[12], border3_with_smaller_font) # 写标本概况
else:
worksheet.write(rownum + 5, colnum + 3, current_row[11] + "采集 " + current_row[12],
border3_with_very_smaller_font)
# 再把check_name重新赋值一下
check_name = current_row[7]
row_counter = 1
workbook.close()
print(f"数据处理完成,一共处理{amount_of_lst}条数据。")
print(f"保存文件<{savefile_name}>。")
print("-"*46) |
the-stack_0_24317 | import pytest
import os
import numpy as np
from .. import utils
from .. import templates
def test_data_path():
"""
Data path
"""
path = os.path.join(os.path.dirname(__file__), '../data/')
assert(os.path.exists(path))
return path
def test_templates_path():
"""
Does ``templates`` path exist?
"""
path = test_data_path()
assert(os.path.exists(os.path.join(path, 'templates')))
return os.path.join(path, 'templates')
def test_read_template_ascii():
"""
Test interpolation function
"""
path = test_templates_path()
ascii_file = os.path.join(path, 'fsps_full/fsps_QSF_12_v3_001.dat')
templ = templates.Template(file=ascii_file)
assert(templ.name == 'fsps_QSF_12_v3_001.dat')
assert(np.allclose(templ.flux.shape, [1,5994]))
return templ
def test_read_template_fits():
"""
Read template FITS file
"""
path = test_templates_path()
fits_file = os.path.join(path,
'spline_templates_v2/spline_age0.01_av0.0.fits')
templ = templates.Template(file=fits_file)
assert(np.allclose(templ.flux.shape, [templ.NZ, 12603]))
assert(templ.name == 'spline_age0.01_av0.0.fits')
return templ |
the-stack_0_24318 | """ Utility for generating TCL script to launch simulation on Vivado
"""
import argparse
def create_sim(f_out, args):
print("""
launch_simulation
""", file=f_out)
clock_pins = args.clock_pins.split(';')
clock_periods = args.clock_periods.split(';')
assert len(clock_pins) == len(clock_periods)
for clock_pin, clock_period in zip(clock_pins, map(float, clock_periods)):
print(
"""
add_force {{/{top}/{pin}}} -radix hex {{0 0ns}} {{1 {half_period}ns}} -repeat_every {period}ns
""".format(
top=args.top,
pin=clock_pin,
half_period=clock_period / 2.0,
period=clock_period,
),
file=f_out
)
print("""
restart
run 1us
run 1us
""", file=f_out)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--top', help="Top-level module name.", required=True)
parser.add_argument(
'--clock_pins',
help="Semi-colon seperated list of clock pins.",
required=True
)
parser.add_argument(
'--clock_periods',
help="Semi-colon seperated list of clock periods (in ns).",
required=True
)
parser.add_argument(
'--output_tcl', help="Filename of output TCL file.", required=True
)
args = parser.parse_args()
with open(args.output_tcl, 'w') as f:
create_sim(f, args)
if __name__ == "__main__":
main()
|
the-stack_0_24319 | from tkinter import Tk, Label, StringVar, Button, X
from tkinter.filedialog import askopenfilename
import iCalSalad
def main():
file_path = ["", ""]
window = Tk()
window.title("salad.nu -> ical")
window.geometry("300x300")
file_entry = Label(text="csv file", font="helvetica 14",)
file_entry.pack(fill=X)
file_entry_button = Button(
window,
text="Select File to convert",
font="helvetica 14",
width=15,
height=1,
command = lambda: get_file_path(file_entry, file_path)
)
file_entry_button.pack()
status_text = StringVar()
go_button = Button(
window,
text="Convert",
font="helvetica 14",
width=10,
height=1,
command= lambda:convert(status_text, file_path)
)
go_button.pack()
status_section = Label(
textvariable=status_text,
font="helvetica 12",
wraplength=300,
justify="center",
)
status_section.pack()
window.mainloop()
def add_to_status(status_text, message):
t = message + "\n" + status_text.get()
status_text.set(t)
def convert(status_text, file_path):
add_to_status(status_text, "Converting")
iCalSalad.convert(file_path[0], file_path[1])
add_to_status(status_text, f"Converted, saved to {file_path[1]}")
def get_file_path(entry_field, file_path):
path = askopenfilename(title = "Select csv",filetypes = (("csv files","*.csv"),("all files","*.*")))
if path is not None:
o = path.rsplit("/", 1)
file_path[0] = path
file_path[1] = o[0]+"/nu_calendar.ics"
entry_field.config(text=o[1])
if __name__ == "__main__":
main() |
the-stack_0_24321 | #!/usr/bin/env python
"""Create the main TCC status window
(which is also the main TUI window, since it has the menus)
History:
2003-06-09 ROwen added addWindow and renamed from StatusWdg to StatusWindow.
2003-06-25 ROwen Modified test case to handle message data as a dict
2003-12-17 ROwen Modified to use renamed TUI.MainMenu.
2004-02-04 ROwen Modified _HelpURL to match minor help reorg.
2004-02-17 ROwen Changed buildAutoMenus to buildMenus.
2004-05-18 ROwen Removed unused local variable in addWindow.
2004-08-11 ROwen Modified for updated RO.Wdg.Toplevel.
2006-03-16 ROwen Modified to use TestData module for testing.
2009-04-17 ROwen Added this window to the TUI menu.
2009-09-09 ROwen Moved this window to the TCC menu.
Modified for changes in the TestData module.
Added constant WindowName.
2011-02-16 ROwen Added AxisOffsetWdg and moved MiscWdg above the offsets.
"""
import Tkinter
import AxisStatus
import NetPosWdg
import OffsetWdg
import AxisOffsetWdg
import MiscWdg
import RO.Wdg
import SlewStatus
WindowName = "TCC.Status"
def addWindow(tlSet):
"""Set up the main status window
"""
tlSet.createToplevel(
name = WindowName,
defGeom = "+0+22",
resizable = False,
closeMode = RO.Wdg.tl_CloseDisabled,
wdgFunc = StatusWdg,
)
_HelpPrefix = "Telescope/StatusWin.html#"
class StatusWdg (Tkinter.Frame):
def __init__ (self,
master = None,
**kargs):
"""creates a new telescope status frame
Inputs:
- master master Tk widget -- typically a frame or window
"""
Tkinter.Frame.__init__(self, master=master, **kargs)
row = 1
self.netPosWdg = NetPosWdg.NetPosWdg(
master=self,
borderwidth=1,
)
self.netPosWdg.grid(row=row, column=0, sticky="w")
self.slewStatusWdg = SlewStatus.SlewStatusWdg(
master = self,
)
self.slewStatusWdg.grid(row=row, column=1, sticky="ns")
row += 1
self.miscWdg = MiscWdg.MiscWdg(
master=self,
borderwidth=1,
relief="ridge",
)
self.miscWdg.grid(row=row, column=0, columnspan=2, sticky="ew")
row += 1
self.offsetWdg = OffsetWdg.OffsetWdg(
master=self,
borderwidth=1,
relief="ridge",
)
self.offsetWdg.grid(row=row, column=0, columnspan=2, sticky="ew")
row += 1
self.axisOffsetWdg = AxisOffsetWdg.AxisOffsetWdg(
master=self,
borderwidth=1,
relief="ridge",
)
self.axisOffsetWdg.grid(row=row, column=0, columnspan=2, sticky="ew")
row += 1
self.axisStatusWdg = AxisStatus.AxisStatusWdg(
master=self,
borderwidth=1,
relief="ridge",
)
self.axisStatusWdg.grid(row=row, column=0, columnspan=2, sticky="ew")
row += 1
# set up status bar; this is only for showing help,
# not command status, so we can omit dispatcher and prefs
self.statusBar = RO.Wdg.StatusBar(
master = self,
helpURL = _HelpPrefix + "StatusBar",
)
self.statusBar.grid(row=row, column=0, columnspan=2, sticky="ew")
row += 1
if __name__ == "__main__":
import TestData
root = TestData.tuiModel.tkRoot
testFrame = StatusWdg(TestData.tuiModel.tkRoot)
testFrame.pack()
TestData.init()
TestData.runTest()
root.mainloop()
|
the-stack_0_24322 | import copy
import re
import sys
import tempfile
import unittest
from mock.tests.support import ALWAYS_EQ
from mock.tests.support import is_instance
from mock import (
call, DEFAULT, patch, sentinel,
MagicMock, Mock, NonCallableMock,
NonCallableMagicMock, AsyncMock,
create_autospec, mock
)
from mock.mock import _Call, _CallList
import mock.mock as mock_module
class Iter(object):
def __init__(self):
self.thing = iter(['this', 'is', 'an', 'iter'])
def __iter__(self):
return self
def next(self):
return next(self.thing)
__next__ = next
class Something(object):
def meth(self, a, b, c, d=None): pass
@classmethod
def cmeth(cls, a, b, c, d=None): pass
@staticmethod
def smeth(a, b, c, d=None): pass
def something(a): pass
class MockTest(unittest.TestCase):
def test_all(self):
# if __all__ is badly defined then import * will raise an error
# We have to exec it because you can't import * inside a method
# in Python 3
exec("from mock.mock import *")
def test_constructor(self):
mock = Mock()
self.assertFalse(mock.called, "called not initialised correctly")
self.assertEqual(mock.call_count, 0,
"call_count not initialised correctly")
self.assertTrue(is_instance(mock.return_value, Mock),
"return_value not initialised correctly")
self.assertEqual(mock.call_args, None,
"call_args not initialised correctly")
self.assertEqual(mock.call_args_list, [],
"call_args_list not initialised correctly")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly")
# Can't use hasattr for this test as it always returns True on a mock
self.assertNotIn('_items', mock.__dict__,
"default mock should not have '_items' attribute")
self.assertIsNone(mock._mock_parent,
"parent not initialised correctly")
self.assertIsNone(mock._mock_methods,
"methods not initialised correctly")
self.assertEqual(mock._mock_children, {},
"children not initialised incorrectly")
def test_return_value_in_constructor(self):
mock = Mock(return_value=None)
self.assertIsNone(mock.return_value,
"return value in constructor not honoured")
def test_change_return_value_via_delegate(self):
def f(): pass
mock = create_autospec(f)
mock.mock.return_value = 1
self.assertEqual(mock(), 1)
def test_change_side_effect_via_delegate(self):
def f(): pass
mock = create_autospec(f)
mock.mock.side_effect = TypeError()
with self.assertRaises(TypeError):
mock()
def test_repr(self):
mock = Mock(name='foo')
self.assertIn('foo', repr(mock))
self.assertIn("'%s'" % id(mock), repr(mock))
mocks = [(Mock(), 'mock'), (Mock(name='bar'), 'bar')]
for mock, name in mocks:
self.assertIn('%s.bar' % name, repr(mock.bar))
self.assertIn('%s.foo()' % name, repr(mock.foo()))
self.assertIn('%s.foo().bing' % name, repr(mock.foo().bing))
self.assertIn('%s()' % name, repr(mock()))
self.assertIn('%s()()' % name, repr(mock()()))
self.assertIn('%s()().foo.bar.baz().bing' % name,
repr(mock()().foo.bar.baz().bing))
def test_repr_with_spec(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec=X())
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec_set=X)
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec_set=X())
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec=X, name='foo')
self.assertIn(" spec='X' ", repr(mock))
self.assertIn(" name='foo' ", repr(mock))
mock = Mock(name='foo')
self.assertNotIn("spec", repr(mock))
mock = Mock()
self.assertNotIn("spec", repr(mock))
mock = Mock(spec=['foo'])
self.assertNotIn("spec", repr(mock))
def test_side_effect(self):
mock = Mock()
def effect(*args, **kwargs):
raise SystemError('kablooie')
mock.side_effect = effect
self.assertRaises(SystemError, mock, 1, 2, fish=3)
mock.assert_called_with(1, 2, fish=3)
results = [1, 2, 3]
def effect():
return results.pop()
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"side effect not used correctly")
mock = Mock(side_effect=sentinel.SideEffect)
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side effect in constructor not used")
def side_effect():
return DEFAULT
mock = Mock(side_effect=side_effect, return_value=sentinel.RETURN)
self.assertEqual(mock(), sentinel.RETURN)
def test_autospec_side_effect(self):
# Test for issue17826
results = [1, 2, 3]
def effect():
return results.pop()
def f(): pass
mock = create_autospec(f)
mock.side_effect = [1, 2, 3]
self.assertEqual([mock(), mock(), mock()], [1, 2, 3],
"side effect not used correctly in create_autospec")
# Test where side effect is a callable
results = [1, 2, 3]
mock = create_autospec(f)
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"callable side effect not used correctly")
def test_autospec_side_effect_exception(self):
# Test for issue 23661
def f(): pass
mock = create_autospec(f)
mock.side_effect = ValueError('Bazinga!')
self.assertRaisesRegex(ValueError, 'Bazinga!', mock)
def test_reset_mock(self):
parent = Mock()
spec = ["something"]
mock = Mock(name="child", parent=parent, spec=spec)
mock(sentinel.Something, something=sentinel.SomethingElse)
something = mock.something
mock.something()
mock.side_effect = sentinel.SideEffect
return_value = mock.return_value
return_value()
mock.reset_mock()
self.assertEqual(mock._mock_name, "child",
"name incorrectly reset")
self.assertEqual(mock._mock_parent, parent,
"parent incorrectly reset")
self.assertEqual(mock._mock_methods, spec,
"methods incorrectly reset")
self.assertFalse(mock.called, "called not reset")
self.assertEqual(mock.call_count, 0, "call_count not reset")
self.assertEqual(mock.call_args, None, "call_args not reset")
self.assertEqual(mock.call_args_list, [], "call_args_list not reset")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly: %r != %r" %
(mock.method_calls, []))
self.assertEqual(mock.mock_calls, [])
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side_effect incorrectly reset")
self.assertEqual(mock.return_value, return_value,
"return_value incorrectly reset")
self.assertFalse(return_value.called, "return value mock not reset")
self.assertEqual(mock._mock_children, {'something': something},
"children reset incorrectly")
self.assertEqual(mock.something, something,
"children incorrectly cleared")
self.assertFalse(mock.something.called, "child not reset")
def test_reset_mock_recursion(self):
mock = Mock()
mock.return_value = mock
# used to cause recursion
mock.reset_mock()
def test_reset_mock_on_mock_open_issue_18622(self):
a = mock.mock_open()
a.reset_mock()
def test_call(self):
mock = Mock()
self.assertTrue(is_instance(mock.return_value, Mock),
"Default return_value should be a Mock")
result = mock()
self.assertEqual(mock(), result,
"different result from consecutive calls")
mock.reset_mock()
ret_val = mock(sentinel.Arg)
self.assertTrue(mock.called, "called not set")
self.assertEqual(mock.call_count, 1, "call_count incorrect")
self.assertEqual(mock.call_args, ((sentinel.Arg,), {}),
"call_args not set")
self.assertEqual(mock.call_args.args, (sentinel.Arg,),
"call_args not set")
self.assertEqual(mock.call_args.kwargs, {},
"call_args not set")
self.assertEqual(mock.call_args_list, [((sentinel.Arg,), {})],
"call_args_list not initialised correctly")
mock.return_value = sentinel.ReturnValue
ret_val = mock(sentinel.Arg, key=sentinel.KeyArg)
self.assertEqual(ret_val, sentinel.ReturnValue,
"incorrect return value")
self.assertEqual(mock.call_count, 2, "call_count incorrect")
self.assertEqual(mock.call_args,
((sentinel.Arg,), {'key': sentinel.KeyArg}),
"call_args not set")
self.assertEqual(mock.call_args_list, [
((sentinel.Arg,), {}),
((sentinel.Arg,), {'key': sentinel.KeyArg})
],
"call_args_list not set")
def test_call_args_comparison(self):
mock = Mock()
mock()
mock(sentinel.Arg)
mock(kw=sentinel.Kwarg)
mock(sentinel.Arg, kw=sentinel.Kwarg)
self.assertEqual(mock.call_args_list, [
(),
((sentinel.Arg,),),
({"kw": sentinel.Kwarg},),
((sentinel.Arg,), {"kw": sentinel.Kwarg})
])
self.assertEqual(mock.call_args,
((sentinel.Arg,), {"kw": sentinel.Kwarg}))
self.assertEqual(mock.call_args.args, (sentinel.Arg,))
self.assertEqual(mock.call_args.kwargs, {"kw": sentinel.Kwarg})
# Comparing call_args to a long sequence should not raise
# an exception. See issue 24857.
self.assertFalse(mock.call_args == "a long sequence")
def test_calls_equal_with_any(self):
# Check that equality and non-equality is consistent even when
# comparing with mock.ANY
mm = mock.MagicMock()
self.assertTrue(mm == mm)
self.assertFalse(mm != mm)
self.assertFalse(mm == mock.MagicMock())
self.assertTrue(mm != mock.MagicMock())
self.assertTrue(mm == mock.ANY)
self.assertFalse(mm != mock.ANY)
self.assertTrue(mock.ANY == mm)
self.assertFalse(mock.ANY != mm)
self.assertTrue(mm == ALWAYS_EQ)
self.assertFalse(mm != ALWAYS_EQ)
call1 = mock.call(mock.MagicMock())
call2 = mock.call(mock.ANY)
self.assertTrue(call1 == call2)
self.assertFalse(call1 != call2)
self.assertTrue(call2 == call1)
self.assertFalse(call2 != call1)
self.assertTrue(call1 == ALWAYS_EQ)
self.assertFalse(call1 != ALWAYS_EQ)
self.assertFalse(call1 == 1)
self.assertTrue(call1 != 1)
def test_assert_called_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_with()
self.assertRaises(AssertionError, mock.assert_called_with, 1)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_with)
mock(1, 2, 3, a='fish', b='nothing')
mock.assert_called_with(1, 2, 3, a='fish', b='nothing')
def test_assert_called_with_any(self):
m = MagicMock()
m(MagicMock())
m.assert_called_with(mock.ANY)
def test_assert_called_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock.assert_called_with(1, 2, 3)
mock.assert_called_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_with,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_called_with(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_assert_called_with_method_spec(self):
def _check(mock):
mock(1, b=2, c=3)
mock.assert_called_with(1, 2, 3)
mock.assert_called_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_with,
1, b=3, c=2)
mock = Mock(spec=Something().meth)
_check(mock)
mock = Mock(spec=Something.cmeth)
_check(mock)
mock = Mock(spec=Something().cmeth)
_check(mock)
mock = Mock(spec=Something.smeth)
_check(mock)
mock = Mock(spec=Something().smeth)
_check(mock)
def test_assert_called_exception_message(self):
msg = "Expected '{0}' to have been called"
with self.assertRaisesRegex(AssertionError, msg.format('mock')):
Mock().assert_called()
with self.assertRaisesRegex(AssertionError, msg.format('test_name')):
Mock(name="test_name").assert_called()
def test_assert_called_once_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_once_with()
mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock('foo', 'bar', baz=2)
mock.assert_called_once_with('foo', 'bar', baz=2)
mock.reset_mock()
mock('foo', 'bar', baz=2)
self.assertRaises(
AssertionError,
lambda: mock.assert_called_once_with('bob', 'bar', baz=2)
)
def test_assert_called_once_with_call_list(self):
m = Mock()
m(1)
m(2)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1), call(2)]"),
lambda: m.assert_called_once_with(2))
def test_assert_called_once_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock.assert_called_once_with(1, 2, 3)
mock.assert_called_once_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_once_with,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_called_once_with(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
# Mock called more than once => always fails
mock(4, 5, 6)
self.assertRaises(AssertionError, mock.assert_called_once_with,
1, 2, 3)
self.assertRaises(AssertionError, mock.assert_called_once_with,
4, 5, 6)
def test_attribute_access_returns_mocks(self):
mock = Mock()
something = mock.something
self.assertTrue(is_instance(something, Mock), "attribute isn't a mock")
self.assertEqual(mock.something, something,
"different attributes returned for same name")
# Usage example
mock = Mock()
mock.something.return_value = 3
self.assertEqual(mock.something(), 3, "method returned wrong value")
self.assertTrue(mock.something.called,
"method didn't record being called")
def test_attributes_have_name_and_parent_set(self):
mock = Mock()
something = mock.something
self.assertEqual(something._mock_name, "something",
"attribute name not set correctly")
self.assertEqual(something._mock_parent, mock,
"attribute parent not set correctly")
def test_method_calls_recorded(self):
mock = Mock()
mock.something(3, fish=None)
mock.something_else.something(6, cake=sentinel.Cake)
self.assertEqual(mock.something_else.method_calls,
[("something", (6,), {'cake': sentinel.Cake})],
"method calls not recorded correctly")
self.assertEqual(mock.method_calls, [
("something", (3,), {'fish': None}),
("something_else.something", (6,), {'cake': sentinel.Cake})
],
"method calls not recorded correctly")
def test_method_calls_compare_easily(self):
mock = Mock()
mock.something()
self.assertEqual(mock.method_calls, [('something',)])
self.assertEqual(mock.method_calls, [('something', (), {})])
mock = Mock()
mock.something('different')
self.assertEqual(mock.method_calls, [('something', ('different',))])
self.assertEqual(mock.method_calls,
[('something', ('different',), {})])
mock = Mock()
mock.something(x=1)
self.assertEqual(mock.method_calls, [('something', {'x': 1})])
self.assertEqual(mock.method_calls, [('something', (), {'x': 1})])
mock = Mock()
mock.something('different', some='more')
self.assertEqual(mock.method_calls, [
('something', ('different',), {'some': 'more'})
])
def test_only_allowed_methods_exist(self):
for spec in ['something'], ('something',):
for arg in 'spec', 'spec_set':
mock = Mock(**{arg: spec})
# this should be allowed
mock.something
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute 'something_else'",
getattr, mock, 'something_else'
)
def test_from_spec(self):
class Something(object):
x = 3
__something__ = None
def y(self): pass
def test_attributes(mock):
# should work
mock.x
mock.y
mock.__something__
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute 'z'",
getattr, mock, 'z'
)
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute '__foobar__'",
getattr, mock, '__foobar__'
)
test_attributes(Mock(spec=Something))
test_attributes(Mock(spec=Something()))
def test_wraps_calls(self):
real = Mock()
mock = Mock(wraps=real)
self.assertEqual(mock(), real())
real.reset_mock()
mock(1, 2, fish=3)
real.assert_called_with(1, 2, fish=3)
def test_wraps_prevents_automatic_creation_of_mocks(self):
class Real(object):
pass
real = Real()
mock = Mock(wraps=real)
self.assertRaises(AttributeError, lambda: mock.new_attr())
def test_wraps_call_with_nondefault_return_value(self):
real = Mock()
mock = Mock(wraps=real)
mock.return_value = 3
self.assertEqual(mock(), 3)
self.assertFalse(real.called)
def test_wraps_attributes(self):
class Real(object):
attribute = Mock()
real = Real()
mock = Mock(wraps=real)
self.assertEqual(mock.attribute(), real.attribute())
self.assertRaises(AttributeError, lambda: mock.fish)
self.assertNotEqual(mock.attribute, real.attribute)
result = mock.attribute.frog(1, 2, fish=3)
Real.attribute.frog.assert_called_with(1, 2, fish=3)
self.assertEqual(result, Real.attribute.frog())
def test_customize_wrapped_object_with_side_effect_iterable_with_default(self):
class Real(object):
def method(self):
return sentinel.ORIGINAL_VALUE
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, DEFAULT]
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.ORIGINAL_VALUE)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_side_effect_iterable(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, sentinel.VALUE2]
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.VALUE2)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_side_effect_exception(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = RuntimeError
self.assertRaises(RuntimeError, mock.method)
def test_customize_wrapped_object_with_side_effect_function(self):
class Real(object):
def method(self): pass
def side_effect():
return sentinel.VALUE
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = side_effect
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.return_value = sentinel.VALUE
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value_and_side_effect(self):
# side_effect should always take precedence over return_value.
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, sentinel.VALUE2]
mock.method.return_value = sentinel.WRONG_VALUE
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.VALUE2)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_return_value_and_side_effect2(self):
# side_effect can return DEFAULT to default to return_value
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = lambda: DEFAULT
mock.method.return_value = sentinel.VALUE
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value_and_side_effect_default(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, DEFAULT]
mock.method.return_value = sentinel.RETURN
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.RETURN)
self.assertRaises(StopIteration, mock.method)
def test_magic_method_wraps_dict(self):
# bpo-25597: MagicMock with wrap doesn't call wrapped object's
# method for magic methods with default values.
data = {'foo': 'bar'}
wrapped_dict = MagicMock(wraps=data)
self.assertEqual(wrapped_dict.get('foo'), 'bar')
# Accessing key gives a MagicMock
self.assertIsInstance(wrapped_dict['foo'], MagicMock)
# __contains__ method has a default value of False
self.assertFalse('foo' in wrapped_dict)
# return_value is non-sentinel and takes precedence over wrapped value.
wrapped_dict.get.return_value = 'return_value'
self.assertEqual(wrapped_dict.get('foo'), 'return_value')
# return_value is sentinel and hence wrapped value is returned.
wrapped_dict.get.return_value = sentinel.DEFAULT
self.assertEqual(wrapped_dict.get('foo'), 'bar')
self.assertEqual(wrapped_dict.get('baz'), None)
self.assertIsInstance(wrapped_dict['baz'], MagicMock)
self.assertFalse('bar' in wrapped_dict)
data['baz'] = 'spam'
self.assertEqual(wrapped_dict.get('baz'), 'spam')
self.assertIsInstance(wrapped_dict['baz'], MagicMock)
self.assertFalse('bar' in wrapped_dict)
del data['baz']
self.assertEqual(wrapped_dict.get('baz'), None)
def test_magic_method_wraps_class(self):
class Foo:
def __getitem__(self, index):
return index
def __custom_method__(self):
return "foo"
klass = MagicMock(wraps=Foo)
obj = klass()
self.assertEqual(obj.__getitem__(2), 2)
self.assertEqual(obj[2], 2)
self.assertEqual(obj.__custom_method__(), "foo")
def test_exceptional_side_effect(self):
mock = Mock(side_effect=AttributeError)
self.assertRaises(AttributeError, mock)
mock = Mock(side_effect=AttributeError('foo'))
self.assertRaises(AttributeError, mock)
def test_baseexceptional_side_effect(self):
mock = Mock(side_effect=KeyboardInterrupt)
self.assertRaises(KeyboardInterrupt, mock)
mock = Mock(side_effect=KeyboardInterrupt('foo'))
self.assertRaises(KeyboardInterrupt, mock)
def test_assert_called_with_message(self):
mock = Mock()
self.assertRaisesRegex(AssertionError, 'not called',
mock.assert_called_with)
def test_assert_called_once_with_message(self):
mock = Mock(name='geoffrey')
self.assertRaisesRegex(AssertionError,
r"Expected 'geoffrey' to be called once\.",
mock.assert_called_once_with)
def test__name__(self):
mock = Mock()
self.assertRaises(AttributeError, lambda: mock.__name__)
mock.__name__ = 'foo'
self.assertEqual(mock.__name__, 'foo')
def test_spec_list_subclass(self):
class Sub(list):
pass
mock = Mock(spec=Sub(['foo']))
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock, 'foo')
def test_spec_class(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIsInstance(mock, X)
mock = Mock(spec=X())
self.assertIsInstance(mock, X)
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertIsInstance(mock, X)
mock = Mock(spec_set=X())
self.assertIsInstance(mock, X)
def test_spec_class_no_object_base(self):
class X:
pass
mock = Mock(spec=X)
self.assertIsInstance(mock, X)
mock = Mock(spec=X())
self.assertIsInstance(mock, X)
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertIsInstance(mock, X)
mock = Mock(spec_set=X())
self.assertIsInstance(mock, X)
def test_setting_attribute_with_spec_set(self):
class X(object):
y = 3
mock = Mock(spec=X)
mock.x = 'foo'
mock = Mock(spec_set=X)
def set_attr():
mock.x = 'foo'
mock.y = 'foo'
self.assertRaises(AttributeError, set_attr)
def test_copy(self):
current = sys.getrecursionlimit()
self.addCleanup(sys.setrecursionlimit, current)
# can't use sys.maxint as this doesn't exist in Python 3
sys.setrecursionlimit(int(10e8))
# this segfaults without the fix in place
copy.copy(Mock())
def test_subclass_with_properties(self):
class SubClass(Mock):
def _get(self):
return 3
def _set(self, value):
raise NameError('strange error')
some_attribute = property(_get, _set)
s = SubClass(spec_set=SubClass)
self.assertEqual(s.some_attribute, 3)
def test():
s.some_attribute = 3
self.assertRaises(NameError, test)
def test():
s.foo = 'bar'
self.assertRaises(AttributeError, test)
def test_setting_call(self):
mock = Mock()
def __call__(self, a):
self._increment_mock_call(a)
return self._mock_call(a)
type(mock).__call__ = __call__
mock('one')
mock.assert_called_with('one')
self.assertRaises(TypeError, mock, 'one', 'two')
def test_dir(self):
mock = Mock()
attrs = set(dir(mock))
type_attrs = set([m for m in dir(Mock) if not m.startswith('_')])
# all public attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
# creates these attributes
mock.a, mock.b
self.assertIn('a', dir(mock))
self.assertIn('b', dir(mock))
# instance attributes
mock.c = mock.d = None
self.assertIn('c', dir(mock))
self.assertIn('d', dir(mock))
# magic methods
mock.__iter__ = lambda s: iter([])
self.assertIn('__iter__', dir(mock))
def test_dir_from_spec(self):
mock = Mock(spec=unittest.TestCase)
testcase_attrs = set(dir(unittest.TestCase))
attrs = set(dir(mock))
# all attributes from the spec are included
self.assertEqual(set(), testcase_attrs - attrs)
# shadow a sys attribute
mock.version = 3
self.assertEqual(dir(mock).count('version'), 1)
def test_filter_dir(self):
patcher = patch.object(mock, 'FILTER_DIR', False)
patcher.start()
try:
attrs = set(dir(Mock()))
type_attrs = set(dir(Mock))
# ALL attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
finally:
patcher.stop()
def test_dir_does_not_include_deleted_attributes(self):
mock = Mock()
mock.child.return_value = 1
self.assertIn('child', dir(mock))
del mock.child
self.assertNotIn('child', dir(mock))
def test_configure_mock(self):
mock = Mock(foo='bar')
self.assertEqual(mock.foo, 'bar')
mock = MagicMock(foo='bar')
self.assertEqual(mock.foo, 'bar')
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
mock = Mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
mock = Mock()
mock.configure_mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def assertRaisesWithMsg(self, exception, message, func, *args, **kwargs):
# needed because assertRaisesRegex doesn't work easily with newlines
with self.assertRaises(exception) as context:
func(*args, **kwargs)
msg = str(context.exception)
self.assertEqual(msg, message)
def test_assert_called_with_failure_message(self):
mock = NonCallableMock()
actual = 'not called.'
expected = "mock(1, '2', 3, bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
mock.assert_called_with, 1, '2', 3, bar='foo'
)
mock.foo(1, '2', 3, foo='foo')
asserters = [
mock.foo.assert_called_with, mock.foo.assert_called_once_with
]
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, '2', 3, bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, '2', 3, bar='foo'
)
# just kwargs
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, bar='foo'
)
# just args
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, 2, 3)"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, 2, 3
)
# empty
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo()"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual), meth
)
def test_mock_calls(self):
mock = MagicMock()
# need to do this because MagicMock.mock_calls used to just return
# a MagicMock which also returned a MagicMock when __eq__ was called
self.assertIs(mock.mock_calls == [], True)
mock = MagicMock()
mock()
expected = [('', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock.foo()
expected.append(call.foo())
self.assertEqual(mock.mock_calls, expected)
# intermediate mock_calls work too
self.assertEqual(mock.foo.mock_calls, [('', (), {})])
mock = MagicMock()
mock().foo(1, 2, 3, a=4, b=5)
expected = [
('', (), {}), ('().foo', (1, 2, 3), dict(a=4, b=5))
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.return_value.foo.mock_calls,
[('', (1, 2, 3), dict(a=4, b=5))])
self.assertEqual(mock.return_value.mock_calls,
[('foo', (1, 2, 3), dict(a=4, b=5))])
mock = MagicMock()
mock().foo.bar().baz()
expected = [
('', (), {}), ('().foo.bar', (), {}),
('().foo.bar().baz', (), {})
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().call_list())
for kwargs in dict(), dict(name='bar'):
mock = MagicMock(**kwargs)
int(mock.foo)
expected = [('foo.__int__', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock = MagicMock(**kwargs)
mock.a()()
expected = [('a', (), {}), ('a()', (), {})]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.a().mock_calls, [call()])
mock = MagicMock(**kwargs)
mock(1)(2)(3)
self.assertEqual(mock.mock_calls, call(1)(2)(3).call_list())
self.assertEqual(mock().mock_calls, call(2)(3).call_list())
self.assertEqual(mock()().mock_calls, call(3).call_list())
mock = MagicMock(**kwargs)
mock(1)(2)(3).a.b.c(4)
self.assertEqual(mock.mock_calls,
call(1)(2)(3).a.b.c(4).call_list())
self.assertEqual(mock().mock_calls,
call(2)(3).a.b.c(4).call_list())
self.assertEqual(mock()().mock_calls,
call(3).a.b.c(4).call_list())
mock = MagicMock(**kwargs)
int(mock().foo.bar().baz())
last_call = ('().foo.bar().baz().__int__', (), {})
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().__int__().call_list())
self.assertEqual(mock().foo.bar().mock_calls,
call.baz().__int__().call_list())
self.assertEqual(mock().foo.bar().baz.mock_calls,
call().__int__().call_list())
def test_child_mock_call_equal(self):
m = Mock()
result = m()
result.wibble()
# parent looks like this:
self.assertEqual(m.mock_calls, [call(), call().wibble()])
# but child should look like this:
self.assertEqual(result.mock_calls, [call.wibble()])
def test_mock_call_not_equal_leaf(self):
m = Mock()
m.foo().something()
self.assertNotEqual(m.mock_calls[1], call.foo().different())
self.assertEqual(m.mock_calls[0], call.foo())
def test_mock_call_not_equal_non_leaf(self):
m = Mock()
m.foo().bar()
self.assertNotEqual(m.mock_calls[1], call.baz().bar())
self.assertNotEqual(m.mock_calls[0], call.baz())
def test_mock_call_not_equal_non_leaf_params_different(self):
m = Mock()
m.foo(x=1).bar()
# This isn't ideal, but there's no way to fix it without breaking backwards compatibility:
self.assertEqual(m.mock_calls[1], call.foo(x=2).bar())
def test_mock_call_not_equal_non_leaf_attr(self):
m = Mock()
m.foo.bar()
self.assertNotEqual(m.mock_calls[0], call.baz.bar())
def test_mock_call_not_equal_non_leaf_call_versus_attr(self):
m = Mock()
m.foo.bar()
self.assertNotEqual(m.mock_calls[0], call.foo().bar())
def test_mock_call_repr(self):
m = Mock()
m.foo().bar().baz.bob()
self.assertEqual(repr(m.mock_calls[0]), 'call.foo()')
self.assertEqual(repr(m.mock_calls[1]), 'call.foo().bar()')
self.assertEqual(repr(m.mock_calls[2]), 'call.foo().bar().baz.bob()')
def test_mock_call_repr_loop(self):
m = Mock()
m.foo = m
repr(m.foo())
self.assertRegex(repr(m.foo()), r"<Mock name='mock\(\)' id='\d+'>")
def test_mock_calls_contains(self):
m = Mock()
self.assertFalse([call()] in m.mock_calls)
def test_subclassing(self):
class Subclass(Mock):
pass
mock = Subclass()
self.assertIsInstance(mock.foo, Subclass)
self.assertIsInstance(mock(), Subclass)
class Subclass(Mock):
def _get_child_mock(self, **kwargs):
return Mock(**kwargs)
mock = Subclass()
self.assertNotIsInstance(mock.foo, Subclass)
self.assertNotIsInstance(mock(), Subclass)
def test_arg_lists(self):
mocks = [
Mock(),
MagicMock(),
NonCallableMock(),
NonCallableMagicMock()
]
def assert_attrs(mock):
names = 'call_args_list', 'method_calls', 'mock_calls'
for name in names:
attr = getattr(mock, name)
self.assertIsInstance(attr, _CallList)
self.assertIsInstance(attr, list)
self.assertEqual(attr, [])
for mock in mocks:
assert_attrs(mock)
if callable(mock):
mock()
mock(1, 2)
mock(a=3)
mock.reset_mock()
assert_attrs(mock)
mock.foo()
mock.foo.bar(1, a=3)
mock.foo(1).bar().baz(3)
mock.reset_mock()
assert_attrs(mock)
def test_call_args_two_tuple(self):
mock = Mock()
mock(1, a=3)
mock(2, b=4)
self.assertEqual(len(mock.call_args), 2)
self.assertEqual(mock.call_args.args, (2,))
self.assertEqual(mock.call_args.kwargs, dict(b=4))
expected_list = [((1,), dict(a=3)), ((2,), dict(b=4))]
for expected, call_args in zip(expected_list, mock.call_args_list):
self.assertEqual(len(call_args), 2)
self.assertEqual(expected[0], call_args[0])
self.assertEqual(expected[1], call_args[1])
def test_side_effect_iterator(self):
mock = Mock(side_effect=iter([1, 2, 3]))
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
mock = MagicMock(side_effect=['a', 'b', 'c'])
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
mock = Mock(side_effect='ghi')
self.assertEqual([mock(), mock(), mock()], ['g', 'h', 'i'])
self.assertRaises(StopIteration, mock)
class Foo(object):
pass
mock = MagicMock(side_effect=Foo)
self.assertIsInstance(mock(), Foo)
mock = Mock(side_effect=Iter())
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
def test_side_effect_iterator_exceptions(self):
for Klass in Mock, MagicMock:
iterable = (ValueError, 3, KeyError, 6)
m = Klass(side_effect=iterable)
self.assertRaises(ValueError, m)
self.assertEqual(m(), 3)
self.assertRaises(KeyError, m)
self.assertEqual(m(), 6)
def test_side_effect_setting_iterator(self):
mock = Mock()
mock.side_effect = iter([1, 2, 3])
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
mock.side_effect = ['a', 'b', 'c']
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
this_iter = Iter()
mock.side_effect = this_iter
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
self.assertIs(mock.side_effect, this_iter)
def test_side_effect_iterator_default(self):
mock = Mock(return_value=2)
mock.side_effect = iter([1, DEFAULT])
self.assertEqual([mock(), mock()], [1, 2])
def test_assert_has_calls_any_order(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(b=6)
kalls = [
call(1, 2), ({'a': 3},),
((3, 4),), ((), {'a': 3}),
('', (1, 2)), ('', {'a': 3}),
('', (1, 2), {}), ('', (), {'a': 3})
]
for kall in kalls:
mock.assert_has_calls([kall], any_order=True)
for kall in call(1, '2'), call(b=3), call(), 3, None, 'foo':
self.assertRaises(
AssertionError, mock.assert_has_calls,
[kall], any_order=True
)
kall_lists = [
[call(1, 2), call(b=6)],
[call(3, 4), call(1, 2)],
[call(b=6), call(b=6)],
]
for kall_list in kall_lists:
mock.assert_has_calls(kall_list, any_order=True)
kall_lists = [
[call(b=6), call(b=6), call(b=6)],
[call(1, 2), call(1, 2)],
[call(3, 4), call(1, 2), call(5, 7)],
[call(b=6), call(3, 4), call(b=6), call(1, 2), call(b=6)],
]
for kall_list in kall_lists:
self.assertRaises(
AssertionError, mock.assert_has_calls,
kall_list, any_order=True
)
def test_assert_has_calls(self):
kalls1 = [
call(1, 2), ({'a': 3},),
((3, 4),), call(b=6),
('', (1,), {'b': 6}),
]
kalls2 = [call.foo(), call.bar(1)]
kalls2.extend(call.spam().baz(a=3).call_list())
kalls2.extend(call.bam(set(), foo={}).fish([1]).call_list())
mocks = []
for mock in Mock(), MagicMock():
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(1, b=6)
mocks.append((mock, kalls1))
mock = Mock()
mock.foo()
mock.bar(1)
mock.spam().baz(a=3)
mock.bam(set(), foo={}).fish([1])
mocks.append((mock, kalls2))
for mock, kalls in mocks:
for i in range(len(kalls)):
for step in 1, 2, 3:
these = kalls[i:i+step]
mock.assert_has_calls(these)
if len(these) > 1:
self.assertRaises(
AssertionError,
mock.assert_has_calls,
list(reversed(these))
)
def test_assert_has_calls_nested_spec(self):
class Something:
def __init__(self): pass
def meth(self, a, b, c, d=None): pass
class Foo:
def __init__(self, a): pass
def meth1(self, a, b): pass
mock_class = create_autospec(Something)
for m in [mock_class, mock_class()]:
m.meth(1, 2, 3, d=1)
m.assert_has_calls([call.meth(1, 2, 3, d=1)])
m.assert_has_calls([call.meth(1, 2, 3, 1)])
mock_class.reset_mock()
for m in [mock_class, mock_class()]:
self.assertRaises(AssertionError, m.assert_has_calls, [call.Foo()])
m.Foo(1).meth1(1, 2)
m.assert_has_calls([call.Foo(1), call.Foo(1).meth1(1, 2)])
m.Foo.assert_has_calls([call(1), call().meth1(1, 2)])
mock_class.reset_mock()
invalid_calls = [call.meth(1),
call.non_existent(1),
call.Foo().non_existent(1),
call.Foo().meth(1, 2, 3, 4)]
for kall in invalid_calls:
self.assertRaises(AssertionError,
mock_class.assert_has_calls,
[kall]
)
def test_assert_has_calls_nested_without_spec(self):
m = MagicMock()
m().foo().bar().baz()
m.one().two().three()
calls = call.one().two().three().call_list()
m.assert_has_calls(calls)
def test_assert_has_calls_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock(4, 5, c=6, d=7)
mock(10, 11, c=12)
calls = [
('', (1, 2, 3), {}),
('', (4, 5, 6), {'d': 7}),
((10, 11, 12), {}),
]
mock.assert_has_calls(calls)
mock.assert_has_calls(calls, any_order=True)
mock.assert_has_calls(calls[1:])
mock.assert_has_calls(calls[1:], any_order=True)
mock.assert_has_calls(calls[:-1])
mock.assert_has_calls(calls[:-1], any_order=True)
# Reversed order
calls = list(reversed(calls))
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls)
mock.assert_has_calls(calls, any_order=True)
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls[1:])
mock.assert_has_calls(calls[1:], any_order=True)
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls[:-1])
mock.assert_has_calls(calls[:-1], any_order=True)
def test_assert_has_calls_not_matching_spec_error(self):
def f(x=None): pass
mock = Mock(spec=f)
mock(1)
with self.assertRaisesRegex(
AssertionError,
'^{}$'.format(
re.escape('Calls not found.\n'
'Expected: [call()]\n'
'Actual: [call(1)]'))) as cm:
mock.assert_has_calls([call()])
self.assertIsNone(cm.exception.__cause__)
with self.assertRaisesRegex(
AssertionError,
'^{}$'.format(
re.escape(
'Error processing expected calls.\n'
"Errors: [None, TypeError('too many positional arguments')]\n"
"Expected: [call(), call(1, 2)]\n"
'Actual: [call(1)]').replace(
"arguments\\'", "arguments\\',?"
))) as cm:
mock.assert_has_calls([call(), call(1, 2)])
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_assert_any_call(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(1, b=6)
mock.assert_any_call(1, 2)
mock.assert_any_call(a=3)
mock.assert_any_call(1, b=6)
self.assertRaises(
AssertionError,
mock.assert_any_call
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
1, 3
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
a=4
)
def test_assert_any_call_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock(4, 5, c=6, d=7)
mock.assert_any_call(1, 2, 3)
mock.assert_any_call(a=1, b=2, c=3)
mock.assert_any_call(4, 5, 6, 7)
mock.assert_any_call(a=4, b=5, c=6, d=7)
self.assertRaises(AssertionError, mock.assert_any_call,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_any_call(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_mock_calls_create_autospec(self):
def f(a, b): pass
obj = Iter()
obj.f = f
funcs = [
create_autospec(f),
create_autospec(obj).f
]
for func in funcs:
func(1, 2)
func(3, 4)
self.assertEqual(
func.mock_calls, [call(1, 2), call(3, 4)]
)
#Issue21222
def test_create_autospec_with_name(self):
m = mock.create_autospec(object(), name='sweet_func')
self.assertIn('sweet_func', repr(m))
#Issue23078
def test_create_autospec_classmethod_and_staticmethod(self):
class TestClass:
@classmethod
def class_method(cls): pass
@staticmethod
def static_method(): pass
for method in ('class_method', 'static_method'):
with self.subTest(method=method):
mock_method = mock.create_autospec(getattr(TestClass, method))
mock_method()
mock_method.assert_called_once_with()
self.assertRaises(TypeError, mock_method, 'extra_arg')
#Issue21238
def test_mock_unsafe(self):
m = Mock()
msg = "Attributes cannot start with 'assert' or its misspellings"
with self.assertRaisesRegex(AttributeError, msg):
m.assert_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.assret_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.asert_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.aseert_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.assrt_foo_call()
m = Mock(unsafe=True)
m.assert_foo_call()
m.assret_foo_call()
m.asert_foo_call()
m.aseert_foo_call()
m.assrt_foo_call()
#Issue21262
def test_assert_not_called(self):
m = Mock()
m.hello.assert_not_called()
m.hello()
with self.assertRaises(AssertionError):
m.hello.assert_not_called()
def test_assert_not_called_message(self):
m = Mock()
m(1, 2)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1, 2)]"),
m.assert_not_called)
def test_assert_called(self):
m = Mock()
with self.assertRaises(AssertionError):
m.hello.assert_called()
m.hello()
m.hello.assert_called()
m.hello()
m.hello.assert_called()
def test_assert_called_once(self):
m = Mock()
with self.assertRaises(AssertionError):
m.hello.assert_called_once()
m.hello()
m.hello.assert_called_once()
m.hello()
with self.assertRaises(AssertionError):
m.hello.assert_called_once()
def test_assert_called_once_message(self):
m = Mock()
m(1, 2)
m(3)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1, 2), call(3)]"),
m.assert_called_once)
def test_assert_called_once_message_not_called(self):
m = Mock()
with self.assertRaises(AssertionError) as e:
m.assert_called_once()
self.assertNotIn("Calls:", str(e.exception))
#Issue37212 printout of keyword args now preserves the original order
def test_ordered_call_signature(self):
m = Mock()
m.hello(name='hello', daddy='hero')
text = "call(name='hello', daddy='hero')"
self.assertEqual(repr(m.hello.call_args), text)
#Issue21270 overrides tuple methods for mock.call objects
def test_override_tuple_methods(self):
c = call.count()
i = call.index(132,'hello')
m = Mock()
m.count()
m.index(132,"hello")
self.assertEqual(m.method_calls[0], c)
self.assertEqual(m.method_calls[1], i)
def test_reset_return_sideeffect(self):
m = Mock(return_value=10, side_effect=[2,3])
m.reset_mock(return_value=True, side_effect=True)
self.assertIsInstance(m.return_value, Mock)
self.assertEqual(m.side_effect, None)
def test_reset_return(self):
m = Mock(return_value=10, side_effect=[2,3])
m.reset_mock(return_value=True)
self.assertIsInstance(m.return_value, Mock)
self.assertNotEqual(m.side_effect, None)
def test_reset_sideeffect(self):
m = Mock(return_value=10, side_effect=[2, 3])
m.reset_mock(side_effect=True)
self.assertEqual(m.return_value, 10)
self.assertEqual(m.side_effect, None)
def test_reset_return_with_children(self):
m = MagicMock(f=MagicMock(return_value=1))
self.assertEqual(m.f(), 1)
m.reset_mock(return_value=True)
self.assertNotEqual(m.f(), 1)
def test_reset_return_with_children_side_effect(self):
m = MagicMock(f=MagicMock(side_effect=[2, 3]))
self.assertNotEqual(m.f.side_effect, None)
m.reset_mock(side_effect=True)
self.assertEqual(m.f.side_effect, None)
def test_mock_add_spec(self):
class _One(object):
one = 1
class _Two(object):
two = 2
class Anything(object):
one = two = three = 'four'
klasses = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
]
for Klass in list(klasses):
klasses.append(lambda K=Klass: K(spec=Anything))
klasses.append(lambda K=Klass: K(spec_set=Anything))
for Klass in klasses:
for kwargs in dict(), dict(spec_set=True):
mock = Klass()
#no error
mock.one, mock.two, mock.three
for One, Two in [(_One, _Two), (['one'], ['two'])]:
for kwargs in dict(), dict(spec_set=True):
mock.mock_add_spec(One, **kwargs)
mock.one
self.assertRaises(
AttributeError, getattr, mock, 'two'
)
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
mock.mock_add_spec(Two, **kwargs)
self.assertRaises(
AttributeError, getattr, mock, 'one'
)
mock.two
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
# note that creating a mock, setting an instance attribute, and
# *then* setting a spec doesn't work. Not the intended use case
def test_mock_add_spec_magic_methods(self):
for Klass in MagicMock, NonCallableMagicMock:
mock = Klass()
int(mock)
mock.mock_add_spec(object)
self.assertRaises(TypeError, int, mock)
mock = Klass()
mock['foo']
mock.__int__.return_value =4
mock.mock_add_spec(int)
self.assertEqual(int(mock), 4)
self.assertRaises(TypeError, lambda: mock['foo'])
def test_adding_child_mock(self):
for Klass in (NonCallableMock, Mock, MagicMock, NonCallableMagicMock,
AsyncMock):
mock = Klass()
mock.foo = Mock()
mock.foo()
self.assertEqual(mock.method_calls, [call.foo()])
self.assertEqual(mock.mock_calls, [call.foo()])
mock = Klass()
mock.bar = Mock(name='name')
mock.bar()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
# mock with an existing _new_parent but no name
mock = Klass()
mock.baz = MagicMock()()
mock.baz()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
def test_adding_return_value_mock(self):
for Klass in Mock, MagicMock:
mock = Klass()
mock.return_value = MagicMock()
mock()()
self.assertEqual(mock.mock_calls, [call(), call()()])
def test_manager_mock(self):
class Foo(object):
one = 'one'
two = 'two'
manager = Mock()
p1 = patch.object(Foo, 'one')
p2 = patch.object(Foo, 'two')
mock_one = p1.start()
self.addCleanup(p1.stop)
mock_two = p2.start()
self.addCleanup(p2.stop)
manager.attach_mock(mock_one, 'one')
manager.attach_mock(mock_two, 'two')
Foo.two()
Foo.one()
self.assertEqual(manager.mock_calls, [call.two(), call.one()])
def test_magic_methods_mock_calls(self):
for Klass in Mock, MagicMock:
m = Klass()
m.__int__ = Mock(return_value=3)
m.__float__ = MagicMock(return_value=3.0)
int(m)
float(m)
self.assertEqual(m.mock_calls, [call.__int__(), call.__float__()])
self.assertEqual(m.method_calls, [])
def test_mock_open_reuse_issue_21750(self):
mocked_open = mock.mock_open(read_data='data')
f1 = mocked_open('a-name')
f1_data = f1.read()
f2 = mocked_open('another-name')
f2_data = f2.read()
self.assertEqual(f1_data, f2_data)
def test_mock_open_dunder_iter_issue(self):
# Test dunder_iter method generates the expected result and
# consumes the iterator.
mocked_open = mock.mock_open(read_data='Remarkable\nNorwegian Blue')
f1 = mocked_open('a-name')
lines = [line for line in f1]
self.assertEqual(lines[0], 'Remarkable\n')
self.assertEqual(lines[1], 'Norwegian Blue')
self.assertEqual(list(f1), [])
def test_mock_open_using_next(self):
mocked_open = mock.mock_open(read_data='1st line\n2nd line\n3rd line')
f1 = mocked_open('a-name')
line1 = next(f1)
line2 = f1.__next__()
lines = [line for line in f1]
self.assertEqual(line1, '1st line\n')
self.assertEqual(line2, '2nd line\n')
self.assertEqual(lines[0], '3rd line')
self.assertEqual(list(f1), [])
with self.assertRaises(StopIteration):
next(f1)
def test_mock_open_next_with_readline_with_return_value(self):
mopen = mock.mock_open(read_data='foo\nbarn')
mopen.return_value.readline.return_value = 'abc'
self.assertEqual('abc', next(mopen()))
def test_mock_open_write(self):
# Test exception in file writing write()
mock_namedtemp = mock.mock_open(mock.MagicMock(name='JLV'))
with mock.patch('tempfile.NamedTemporaryFile', mock_namedtemp):
mock_filehandle = mock_namedtemp.return_value
mock_write = mock_filehandle.write
mock_write.side_effect = OSError('Test 2 Error')
def attempt():
tempfile.NamedTemporaryFile().write('asd')
self.assertRaises(OSError, attempt)
def test_mock_open_alter_readline(self):
mopen = mock.mock_open(read_data='foo\nbarn')
mopen.return_value.readline.side_effect = lambda *args:'abc'
first = mopen().readline()
second = mopen().readline()
self.assertEqual('abc', first)
self.assertEqual('abc', second)
def test_mock_open_after_eof(self):
# read, readline and readlines should work after end of file.
_open = mock.mock_open(read_data='foo')
h = _open('bar')
h.read()
self.assertEqual('', h.read())
self.assertEqual('', h.read())
self.assertEqual('', h.readline())
self.assertEqual('', h.readline())
self.assertEqual([], h.readlines())
self.assertEqual([], h.readlines())
def test_mock_parents(self):
for Klass in Mock, MagicMock:
m = Klass()
original_repr = repr(m)
m.return_value = m
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m.reset_mock()
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m = Klass()
m.b = m.a
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m.reset_mock()
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m = Klass()
original_repr = repr(m)
m.a = m()
m.a.return_value = m
self.assertEqual(repr(m), original_repr)
self.assertEqual(repr(m.a()), original_repr)
def test_attach_mock(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in classes:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'bar')
self.assertIs(m.bar, m2)
self.assertIn("name='mock.bar'", repr(m2))
m.bar.baz(1)
self.assertEqual(m.mock_calls, [call.bar.baz(1)])
self.assertEqual(m.method_calls, [call.bar.baz(1)])
def test_attach_mock_return_value(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in Mock, MagicMock:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'return_value')
self.assertIs(m(), m2)
self.assertIn("name='mock()'", repr(m2))
m2.foo()
self.assertEqual(m.mock_calls, call().foo().call_list())
def test_attach_mock_patch_autospec(self):
parent = Mock()
with mock.patch(f'{__name__}.something', autospec=True) as mock_func:
self.assertEqual(mock_func.mock._extract_mock_name(), 'something')
parent.attach_mock(mock_func, 'child')
parent.child(1)
something(2)
mock_func(3)
parent_calls = [call.child(1), call.child(2), call.child(3)]
child_calls = [call(1), call(2), call(3)]
self.assertEqual(parent.mock_calls, parent_calls)
self.assertEqual(parent.child.mock_calls, child_calls)
self.assertEqual(something.mock_calls, child_calls)
self.assertEqual(mock_func.mock_calls, child_calls)
self.assertIn('mock.child', repr(parent.child.mock))
self.assertEqual(mock_func.mock._extract_mock_name(), 'mock.child')
def test_attach_mock_patch_autospec_signature(self):
with mock.patch(f'{__name__}.Something.meth', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_meth')
obj = Something()
obj.meth(1, 2, 3, d=4)
manager.assert_has_calls([call.attach_meth(mock.ANY, 1, 2, 3, d=4)])
obj.meth.assert_has_calls([call(mock.ANY, 1, 2, 3, d=4)])
mocked.assert_has_calls([call(mock.ANY, 1, 2, 3, d=4)])
with mock.patch(f'{__name__}.something', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_func')
something(1)
manager.assert_has_calls([call.attach_func(1)])
something.assert_has_calls([call(1)])
mocked.assert_has_calls([call(1)])
with mock.patch(f'{__name__}.Something', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_obj')
obj = Something()
obj.meth(1, 2, 3, d=4)
manager.assert_has_calls([call.attach_obj(),
call.attach_obj().meth(1, 2, 3, d=4)])
obj.meth.assert_has_calls([call(1, 2, 3, d=4)])
mocked.assert_has_calls([call(), call().meth(1, 2, 3, d=4)])
def test_attribute_deletion(self):
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
self.assertTrue(hasattr(mock, 'm'))
del mock.m
self.assertFalse(hasattr(mock, 'm'))
del mock.f
self.assertFalse(hasattr(mock, 'f'))
self.assertRaises(AttributeError, getattr, mock, 'f')
def test_mock_does_not_raise_on_repeated_attribute_deletion(self):
# bpo-20239: Assigning and deleting twice an attribute raises.
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
mock.foo = 3
self.assertTrue(hasattr(mock, 'foo'))
self.assertEqual(mock.foo, 3)
del mock.foo
self.assertFalse(hasattr(mock, 'foo'))
mock.foo = 4
self.assertTrue(hasattr(mock, 'foo'))
self.assertEqual(mock.foo, 4)
del mock.foo
self.assertFalse(hasattr(mock, 'foo'))
def test_mock_raises_when_deleting_nonexistent_attribute(self):
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
del mock.foo
with self.assertRaises(AttributeError):
del mock.foo
def test_reset_mock_does_not_raise_on_attr_deletion(self):
# bpo-31177: reset_mock should not raise AttributeError when attributes
# were deleted in a mock instance
mock = Mock()
mock.child = True
del mock.child
mock.reset_mock()
self.assertFalse(hasattr(mock, 'child'))
def test_class_assignable(self):
for mock in Mock(), MagicMock():
self.assertNotIsInstance(mock, int)
mock.__class__ = int
self.assertIsInstance(mock, int)
mock.foo
def test_name_attribute_of_call(self):
# bpo-35357: _Call should not disclose any attributes whose names
# may clash with popular ones (such as ".name")
self.assertIsNotNone(call.name)
self.assertEqual(type(call.name), _Call)
self.assertEqual(type(call.name().name), _Call)
def test_parent_attribute_of_call(self):
# bpo-35357: _Call should not disclose any attributes whose names
# may clash with popular ones (such as ".parent")
self.assertIsNotNone(call.parent)
self.assertEqual(type(call.parent), _Call)
self.assertEqual(type(call.parent().parent), _Call)
def test_parent_propagation_with_create_autospec(self):
def foo(a, b): pass
mock = Mock()
mock.child = create_autospec(foo)
mock.child(1, 2)
self.assertRaises(TypeError, mock.child, 1)
self.assertEqual(mock.mock_calls, [call.child(1, 2)])
self.assertIn('mock.child', repr(mock.child.mock))
def test_parent_propagation_with_autospec_attach_mock(self):
def foo(a, b): pass
parent = Mock()
parent.attach_mock(create_autospec(foo, name='bar'), 'child')
parent.child(1, 2)
self.assertRaises(TypeError, parent.child, 1)
self.assertEqual(parent.child.mock_calls, [call.child(1, 2)])
self.assertIn('mock.child', repr(parent.child.mock))
def test_isinstance_under_settrace(self):
# bpo-36593 : __class__ is not set for a class that has __class__
# property defined when it's used with sys.settrace(trace) set.
# Delete the module to force reimport with tracing function set
# restore the old reference later since there are other tests that are
# dependent on unittest.mock.patch. In testpatch.PatchTest
# test_patch_dict_test_prefix and test_patch_test_prefix not restoring
# causes the objects patched to go out of sync
old_patch = mock_module.patch
# Directly using __setattr__ on unittest.mock causes current imported
# reference to be updated. Use a lambda so that during cleanup the
# re-imported new reference is updated.
self.addCleanup(lambda patch: setattr(mock_module, 'patch', patch),
old_patch)
with patch.dict('sys.modules'):
del sys.modules['mock']
# This trace will stop coverage being measured ;-)
def trace(frame, event, arg): # pragma: no cover
return trace
self.addCleanup(sys.settrace, sys.gettrace())
sys.settrace(trace)
from mock.mock import (
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
)
mocks = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock, AsyncMock
]
for mock in mocks:
obj = mock(spec=Something)
self.assertIsInstance(obj, Something)
def test_bool_not_called_when_passing_spec_arg(self):
class Something:
def __init__(self):
self.obj_with_bool_func = mock_module.MagicMock()
obj = Something()
with mock_module.patch.object(obj, 'obj_with_bool_func', autospec=True): pass
self.assertEqual(obj.obj_with_bool_func.__bool__.call_count, 0)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_24326 | #!/usr/bin/env python
"""Pancake turning."""
def remove_end_plus(s):
"""Remove plusses at the end."""
r = s[::-1]
new_s = ""
seen_minus = False
for el in r:
if not seen_minus:
if el == "-":
seen_minus = True
new_s = el
else:
new_s += el
return new_s[::-1]
def solve(pancakes):
"""
Get the minimal number of switchings.
Parameters
----------
pancakes : string
Returns
-------
int
Examples
--------
>>> solve("-")
1
>>> solve("-+")
1
>>> solve("+-")
2
>>> solve("+++")
0
"""
if "-" not in pancakes:
return 0
else:
pancakes = remove_end_plus(pancakes)
last = pancakes[0]
switches = 1
for el in pancakes[1:]:
if el != last:
switches += 1
last = el
return switches
if __name__ == "__main__":
testcases = input()
for caseNr in xrange(1, testcases+1):
cipher = raw_input()
print("Case #%i: %s" % (caseNr, solve(cipher)))
|
the-stack_0_24327 | import copy
import logging
import time
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
from autogluon.core.constants import REGRESSION, BINARY, MULTICLASS
from autogluon.core.utils import try_import_fastai_v1
from autogluon.core.utils.files import make_temp_directory
from autogluon.core.utils.loaders import load_pkl
from autogluon.core.utils.multiprocessing_utils import is_fork_enabled
from autogluon.core.utils.savers import save_pkl
from autogluon.core.features.types import R_OBJECT, R_INT, R_FLOAT, R_DATETIME, R_CATEGORY, R_BOOL
from .hyperparameters.parameters import get_param_baseline
from .hyperparameters.searchspaces import get_default_searchspace
from autogluon.core.models import AbstractModel
from autogluon.core.models.abstract.model_trial import skip_hpo
# FIXME: Has a leak somewhere, training additional models in a single python script will slow down training for each additional model. Gets very slow after 20+ models (10x+ slowdown)
# Slowdown does not appear to impact Mac OS
# Reproduced with raw torch: https://github.com/pytorch/pytorch/issues/31867
# https://forums.fast.ai/t/runtimeerror-received-0-items-of-ancdata/48935
# https://github.com/pytorch/pytorch/issues/973
# https://pytorch.org/docs/master/multiprocessing.html#file-system-file-system
# Slowdown bug not experienced on Linux if 'torch.multiprocessing.set_sharing_strategy('file_system')' commented out
# NOTE: If below line is commented out, Torch uses many file descriptors. If issues arise, increase ulimit through 'ulimit -n 2048' or larger. Default on Linux is 1024.
# torch.multiprocessing.set_sharing_strategy('file_system')
# MacOS issue: torchvision==0.7.0 + torch==1.6.0 can cause segfaults; use torch==1.2.0 torchvision==0.4.0
LABEL = '__label__'
MISSING = '__!#ag_internal_missing#!__'
logger = logging.getLogger(__name__)
# TODO: Takes extremely long time prior to training start if many (10000) continuous features from ngrams, debug - explore TruncateSVD option to reduce input dimensionality
# TODO: currently fastai automatically detect and use CUDA if available - add code to honor autogluon settings
class NNFastAiTabularModel(AbstractModel):
""" Class for fastai v1 neural network models that operate on tabular data.
Hyperparameters:
y_scaler: on a regression problems, the model can give unreasonable predictions on unseen data.
This attribute allows to pass a scaler for y values to address this problem. Please note that intermediate
iteration metrics will be affected by this transform and as a result intermediate iteration scores will be
different from the final ones (these will be correct).
https://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing
'layers': list of hidden layers sizes; None - use model's heuristics; default is None
'emb_drop': embedding layers dropout; defaut is 0.1
'ps': linear layers dropout - list of values applied to every layer in `layers`; default is [0.1]
'bs': batch size; default is 256
'lr': maximum learning rate for one cycle policy; default is 1e-2;
see also https://fastai1.fast.ai/train.html#fit_one_cycle, One-cycle policy paper: https://arxiv.org/abs/1803.09820
'epochs': number of epochs; default is 30
# Early stopping settings. See more details here: https://fastai1.fast.ai/callbacks.tracker.html#EarlyStoppingCallback
'early.stopping.min_delta': 0.0001,
'early.stopping.patience': 10,
'smoothing': If > 0, then use LabelSmoothingCrossEntropy loss function for binary/multi-class classification;
otherwise use default loss function for this type of problem; default is 0.0.
See: https://docs.fast.ai/layers.html#LabelSmoothingCrossEntropy
"""
model_internals_file_name = 'model-internals.pkl'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cat_columns = None
self.cont_columns = None
self.columns_fills = None
self.procs = None
self.y_scaler = None
self._inner_features = None
def _preprocess_train(self, X_train, y_train, X_val, y_val, num_workers):
from fastai.data_block import FloatList
from fastai.tabular import TabularList
X_train = self.preprocess(X_train, fit=True)
if X_val is not None:
X_val = self.preprocess(X_val)
from fastai.tabular import FillMissing, Categorify, Normalize
self.procs = [FillMissing, Categorify, Normalize]
if self.problem_type == REGRESSION and self.y_scaler is not None:
y_train_norm = pd.Series(self.y_scaler.fit_transform(y_train.values.reshape(-1, 1)).reshape(-1))
y_val_norm = pd.Series(self.y_scaler.transform(y_val.values.reshape(-1, 1)).reshape(-1)) if y_val is not None else None
logger.log(0, f'Training with scaled targets: {self.y_scaler} - !!! NN training metric will be different from the final results !!!')
else:
y_train_norm = y_train
y_val_norm = y_val
logger.log(15, f'Using {len(self.cont_columns)} cont features')
df_train, train_idx, val_idx = self._generate_datasets(X_train, y_train_norm, X_val, y_val_norm)
label_class = FloatList if self.problem_type == REGRESSION else None
# Copy cat_columns and cont_columns because TabularList is mutating the list
data = (TabularList.from_df(df_train, path=self.path,
cat_names=self.cat_columns.copy(), cont_names=self.cont_columns.copy(), procs=self.procs)
.split_by_idxs(train_idx, val_idx)
.label_from_df(cols=LABEL, label_cls=label_class)
.databunch(bs=self.params['bs'] if len(X_train) > self.params['bs'] else 32, num_workers=num_workers))
return data
def _preprocess(self, X: pd.DataFrame, fit=False, **kwargs):
X = super()._preprocess(X=X, **kwargs)
if fit:
self.cat_columns = self.feature_metadata.get_features(valid_raw_types=[R_OBJECT, R_CATEGORY, R_BOOL])
self.cont_columns = self.feature_metadata.get_features(valid_raw_types=[R_INT, R_FLOAT, R_DATETIME])
try:
X_train_stats = X.describe(include='all').T.reset_index()
cat_cols_to_drop = X_train_stats[(X_train_stats['unique'] > self.params.get('max_unique_categorical_values', 10000)) | (X_train_stats['unique'].isna())]['index'].values
except:
cat_cols_to_drop = []
cat_cols_to_keep = [col for col in X.columns.values if (col not in cat_cols_to_drop)]
cat_cols_to_use = [col for col in self.cat_columns if col in cat_cols_to_keep]
logger.log(15, f'Using {len(cat_cols_to_use)}/{len(self.cat_columns)} categorical features')
self.cat_columns = cat_cols_to_use
self.cat_columns = [feature for feature in self.cat_columns if feature in list(X.columns)]
self.cont_columns = [feature for feature in self.cont_columns if feature in list(X.columns)]
self.columns_fills = {}
for c in self.cat_columns:
self.columns_fills[c] = MISSING
for c in self.cont_columns:
self.columns_fills[c] = X[c].mean()
self._inner_features = self.cat_columns + self.cont_columns
return self._fill_missing(X)
def _fill_missing(self, df: pd.DataFrame) -> pd.DataFrame:
df = df[self._inner_features].copy()
for c in self.cat_columns:
df[c] = df[c].cat.add_categories(MISSING)
df[c] = df[c].fillna(self.columns_fills[c])
for c in self.cont_columns:
df[c] = df[c].fillna(self.columns_fills[c])
return df
def _fit(self, X_train, y_train, X_val=None, y_val=None, time_limit=None, num_cpus=None, num_gpus=0, **kwargs):
try_import_fastai_v1()
import torch
from fastai.layers import LabelSmoothingCrossEntropy
from fastai.tabular import tabular_learner
from fastai.utils.mod_display import progress_disabled_ctx
from fastai.core import defaults
from .callbacks import EarlyStoppingCallbackWithTimeLimit, SaveModelCallback
start_time = time.time()
params = self.params.copy()
self.y_scaler = params.get('y_scaler', None)
if self.y_scaler is not None:
self.y_scaler = copy.deepcopy(self.y_scaler)
if num_cpus is None:
num_cpus = defaults.cpus
# additional workers are helping only when fork is enabled; in other mp modes, communication overhead reduces performance
num_workers = int(num_cpus / 2)
if not is_fork_enabled():
num_workers = 0
if num_gpus is not None:
if num_gpus == 0:
# TODO: Does not obviously impact inference speed
defaults.device = torch.device('cpu')
else:
defaults.device = torch.device('cuda')
logger.log(15, f'Fitting Neural Network with parameters {params}...')
data = self._preprocess_train(X_train, y_train, X_val, y_val, num_workers=num_workers)
nn_metric, objective_func_name = self.__get_objective_func_name()
objective_func_name_to_monitor = self.__get_objective_func_to_monitor(objective_func_name)
objective_optim_mode = 'min' if objective_func_name in [
'root_mean_squared_error', 'mean_squared_error', 'mean_absolute_error', 'r2' # Regression objectives
] else 'auto'
# TODO: calculate max emb concat layer size and use 1st layer as that value and 2nd in between number of classes and the value
if params.get('layers', None) is not None:
layers = params['layers']
elif self.problem_type in [REGRESSION, BINARY]:
layers = [200, 100]
else:
base_size = max(len(data.classes) * 2, 100)
layers = [base_size * 2, base_size]
loss_func = None
if self.problem_type in [BINARY, MULTICLASS] and params.get('smoothing', 0.0) > 0.0:
loss_func = LabelSmoothingCrossEntropy(params['smoothing'])
ps = params['ps']
if type(ps) != list:
ps = [ps]
if time_limit:
time_elapsed = time.time() - start_time
time_left = time_limit - time_elapsed
else:
time_left = None
best_epoch_stop = params.get("best_epoch", None) # Use best epoch for refit_full.
early_stopping_fn = partial(EarlyStoppingCallbackWithTimeLimit, monitor=objective_func_name_to_monitor, mode=objective_optim_mode,
min_delta=params['early.stopping.min_delta'], patience=params['early.stopping.patience'],
time_limit=time_left, best_epoch_stop=best_epoch_stop)
self.model = tabular_learner(
data, layers=layers, ps=ps, emb_drop=params['emb_drop'], metrics=nn_metric,
loss_func=loss_func, callback_fns=[early_stopping_fn]
)
logger.log(15, self.model.model)
with make_temp_directory() as temp_dir:
save_callback = SaveModelCallback(self.model, monitor=objective_func_name_to_monitor, mode=objective_optim_mode, name=self.name,
best_epoch_stop=best_epoch_stop)
with progress_disabled_ctx(self.model) as model:
original_path = model.path
model.path = Path(temp_dir)
model.fit_one_cycle(params['epochs'], params['lr'], callbacks=save_callback)
# Load the best one and export it
model.load(self.name)
if objective_func_name == 'log_loss':
eval_result = model.validate()[0]
else:
eval_result = model.validate()[1].numpy().reshape(-1)[0]
logger.log(15, f'Model validation metrics: {eval_result}')
model.path = original_path
self.params_trained['best_epoch'] = save_callback.best_epoch
def _generate_datasets(self, X_train, y_train, X_val, y_val):
df_train = pd.concat([X_train, X_val], ignore_index=True)
df_train[LABEL] = pd.concat([y_train, y_val], ignore_index=True)
train_idx = np.arange(len(X_train))
if X_val is None:
val_idx = train_idx # use validation set for refit_full case - it's not going to be used for early stopping
else:
val_idx = np.arange(len(X_val)) + len(X_train)
return df_train, train_idx, val_idx
def __get_objective_func_name(self):
from fastai.metrics import root_mean_squared_error, mean_squared_error, mean_absolute_error, accuracy, FBeta, AUROC, Precision, Recall, r2_score
metrics_map = {
# Regression
'root_mean_squared_error': root_mean_squared_error,
'mean_squared_error': mean_squared_error,
'mean_absolute_error': mean_absolute_error,
'r2': r2_score,
# Not supported: median_absolute_error
# Classification
'accuracy': accuracy,
'f1': FBeta(beta=1),
'f1_macro': FBeta(beta=1, average='macro'),
'f1_micro': FBeta(beta=1, average='micro'),
'f1_weighted': FBeta(beta=1, average='weighted'), # this one has some issues
'roc_auc': AUROC(),
'precision': Precision(),
'precision_macro': Precision(average='macro'),
'precision_micro': Precision(average='micro'),
'precision_weighted': Precision(average='weighted'),
'recall': Recall(),
'recall_macro': Recall(average='macro'),
'recall_micro': Recall(average='micro'),
'recall_weighted': Recall(average='weighted'),
'log_loss': None,
# Not supported: pac_score
}
# Unsupported metrics will be replaced by defaults for a given problem type
objective_func_name = self.stopping_metric.name
if objective_func_name not in metrics_map.keys():
if self.problem_type == REGRESSION:
objective_func_name = 'mean_squared_error'
else:
objective_func_name = 'log_loss'
logger.warning(f'Metric {self.stopping_metric.name} is not supported by this model - using {objective_func_name} instead')
if objective_func_name in metrics_map.keys():
nn_metric = metrics_map[objective_func_name]
else:
nn_metric = None
return nn_metric, objective_func_name
def __get_objective_func_to_monitor(self, objective_func_name):
monitor_obj_func = {
'roc_auc': 'auroc',
'f1': 'f_beta',
'f1_macro': 'f_beta',
'f1_micro': 'f_beta',
'f1_weighted': 'f_beta',
'precision_macro': 'precision',
'precision_micro': 'precision',
'precision_weighted': 'precision',
'recall_macro': 'recall',
'recall_micro': 'recall',
'recall_weighted': 'recall',
'log_loss': 'valid_loss',
}
objective_func_name_to_monitor = objective_func_name
if objective_func_name in monitor_obj_func:
objective_func_name_to_monitor = monitor_obj_func[objective_func_name]
return objective_func_name_to_monitor
def _predict_proba(self, X, **kwargs):
from fastai.basic_data import DatasetType
from fastai.tabular import TabularList
from fastai.utils.mod_display import progress_disabled_ctx
X = self.preprocess(X, **kwargs)
single_row = len(X) == 1
# fastai has issues predicting on a single row, duplicating the row as a workaround
if single_row:
X = pd.concat([X, X]).reset_index(drop=True)
# Copy cat_columns and cont_columns because TabularList is mutating the list
self.model.data.add_test(TabularList.from_df(
X, cat_names=self.cat_columns.copy(), cont_names=self.cont_columns.copy(), procs=self.procs))
with progress_disabled_ctx(self.model) as model:
preds, _ = model.get_preds(ds_type=DatasetType.Test)
if single_row:
preds = preds[:1, :]
if self.problem_type == REGRESSION:
if self.y_scaler is not None:
return self.y_scaler.inverse_transform(preds.numpy()).reshape(-1)
else:
return preds.numpy().reshape(-1)
if self.problem_type == BINARY:
return preds[:, 1].numpy()
else:
return preds.numpy()
def save(self, path: str = None, verbose=True) -> str:
__model = self.model
self.model = None
path_final = super().save(path=path, verbose=verbose)
self.model = __model
# Export model
save_pkl.save_with_fn(f'{path_final}{self.model_internals_file_name}', self.model, lambda m, buffer: m.export(buffer), verbose=verbose)
return path_final
@classmethod
def load(cls, path: str, reset_paths=True, verbose=True):
from fastai.basic_train import load_learner
model = super().load(path, reset_paths=reset_paths, verbose=verbose)
model.model = load_pkl.load_with_fn(f'{model.path}{model.model_internals_file_name}', lambda p: load_learner(model.path, p), verbose=verbose)
return model
def _set_default_params(self):
""" Specifies hyperparameter values to use by default """
default_params = get_param_baseline(self.problem_type)
for param, val in default_params.items():
self._set_default_param_value(param, val)
def _get_default_searchspace(self):
return get_default_searchspace(self.problem_type, num_classes=None)
# TODO: add warning regarding dataloader leak: https://github.com/pytorch/pytorch/issues/31867
# TODO: Add HPO
def _hyperparameter_tune(self, **kwargs):
return skip_hpo(self, **kwargs)
def _get_default_auxiliary_params(self) -> dict:
default_auxiliary_params = super()._get_default_auxiliary_params()
extra_auxiliary_params = dict(
ignored_type_group_raw=[R_OBJECT],
)
default_auxiliary_params.update(extra_auxiliary_params)
return default_auxiliary_params
|
the-stack_0_24328 | """
Link extraction for auto scraping
"""
import re
import os
import posixpath
from six.moves.urllib.parse import urlparse
from scrapy.linkextractors import IGNORED_EXTENSIONS
_ONCLICK_LINK_RE = re.compile("(?P<sep>('|\"))(?P<url>.+?)(?P=sep)")
_ignored_exts = frozenset(['.' + e for e in IGNORED_EXTENSIONS])
# allowed protocols
ALLOWED_SCHEMES = frozenset(['http', 'https', None, ''])
class BaseLinkExtractor(object):
def __init__(self, max_url_len=2083, ignore_extensions=_ignored_exts,
allowed_schemes=ALLOWED_SCHEMES):
"""Creates a new LinkExtractor
The defaults are a good guess for the first time crawl. After that, we
expect that they can be learned.
"""
self.max_url_len = max_url_len
self.ignore_extensions = ignore_extensions
self.allowed_schemes = allowed_schemes
def _extract_links(self, source):
raise NotImplementedError
def links_to_follow(self, source):
"""Returns normalized extracted links"""
for link in self._extract_links(source):
link = self.normalize_link(link)
if link is not None:
yield link
def normalize_link(self, link):
"""Normalize a link
>>> from scrapy.link import Link
>>> le = BaseLinkExtractor()
>>> l = Link('http://scrapinghub.com/some/path/../dir')
>>> le.normalize_link(l).url
'http://scrapinghub.com/some/dir'
>>> l = Link('http://scrapinghub.com/some//./path/')
>>> le.normalize_link(l).url
'http://scrapinghub.com/some/path/'
Files with disallowed extentions or protocols are not returned
>>> le.normalize_link(Link('myimage.jpg')) is None
True
>>> le.normalize_link(Link('file:///tmp/mydoc.htm')) is None
True
>>> le.normalize_link(Link('http://scrapinghub.com')).url
'http://scrapinghub.com/'
Fragments are removed
>>> le.normalize_link(Link('http://example.com/#something')).url
'http://example.com/'
>>> le.normalize_link(Link('http://example.com/#something')).fragment
'something'
>>> le.normalize_link(Link('http://scrapinghub.com#some fragment')).url
'http://scrapinghub.com/'
Ajax crawling
>>> le.normalize_link(Link('http://example.com/#!something')).url
'http://example.com/?_escaped_fragment_=something'
>>> le.normalize_link(Link('http://example.com/page.html?arg=1#!something')).url
'http://example.com/page.html?arg=1&_escaped_fragment_=something'
"""
if len(link.url) > self.max_url_len:
return
parsed = urlparse(link.url)
extention = os.path.splitext(parsed.path)[1].lower()
if parsed.scheme not in self.allowed_schemes or \
extention in self.ignore_extensions:
return
# path normalization
path = parsed.path or '/'
path = path if path[0] != '.' else '/' + path
path = posixpath.normpath(path)
if parsed.path.endswith('/') and not path.endswith('/'):
path += '/'
if parsed.fragment.startswith('!'):
query = '_escaped_fragment_=%s' % parsed.fragment[1:]
query = parsed.query + '&' + query if parsed.query else query
parsed = parsed._replace(query=query)
link.fragment = parsed.fragment
if path != parsed.path or parsed.fragment:
link.url = parsed._replace(path=path, fragment='').geturl()
return link
|
the-stack_0_24329 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from .forms import PostComment,PostImageForm,PostProfile
from .models import Images, Profiles, Comments
# Create your views here.
@login_required(login_url='/accounts/register/')
def index(request):
images = Images.get_all_images()
return render(request,'index.html',{'images':images})
@login_required(login_url='/accounts/login/')
def profile(request,username):
user = User.objects.get(username=username)
profile = Profiles.filter_profile_by_id(user.id)
title = f'{user.username}\'s Profile '
images = Images.get_profile_images(user.id)
return render(request, 'profile/profile.html',{'title':title,'users':user,'profile':profile,'images':images})
@login_required(login_url='/accounts/login/')
def post_image(request):
current_user = request.user
if request.method == 'POST':
form = PostImageForm(request.POST,request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.profile = current_user
image.save()
return redirect('profile',username=request.user)
else:
form = PostImageForm()
return render(request,'profile/post_image.html',{'form':form})
@login_required(login_url='/accounts/login/')
def edit_profile(request):
current_user = request.user
if request.method == 'POST':
form = PostProfile(request.POST, request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.user = current_user
profile.save()
return redirect('editProfile')
else:
form = PostProfile()
return render(request,'profile/edit_profile.html',{'form':form})
@login_required(login_url='/accounts/login/')
def view_single_image(request,image_id):
image = Images.get_image_by_id(image_id)
comments = Comments.get_comment_by_image(image_id)
current_user = request.user
if request.method == 'POST':
form = PostComment(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.image = image
comment.user = request.user
comment.save()
return redirect('singleImage',image_id=image_id)
else:
form = PostComment()
return render(request, 'image.html',{'image':image,'form':form,'comments':comments})
def search(request):
if 'search' in request.GET and request.GET['search']:
search_term = request.GET.get('search')
profiles = Profiles.get_profile_by_name(search_term)
message = f'{search_term}'
return render(request,'search.html',{'message':message,'profiles':profiles})
else:
message = 'Search Username'
return render(request,'search.html',{'message':message})
|
the-stack_0_24330 | from __future__ import print_function
import datetime
import io
import os
import sys
import time
import config
try:
input = raw_input
except NameError:
pass
import azure.storage.blob as azureblob
import azure.batch.batch_service_client as batch
import azure.batch.batch_auth as batchauth
import azure.batch.models as batchmodels
sys.path.append('.')
sys.path.append('..')
# Update the Batch and Storage account credential strings in config.py with values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
def query_yes_no(question, default="yes"):
"""
Prompts the user for yes/no input, displaying the specified question text.
:param str question: The text of the prompt for input.
:param str default: The default if the user hits <ENTER>. Acceptable values
are 'yes', 'no', and None.
:rtype: str
:return: 'yes' or 'no'
"""
valid = {'y': 'yes', 'n': 'no'}
if default is None:
prompt = ' [y/n] '
elif default == 'yes':
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("Invalid default answer: '{}'".format(default))
while 1:
choice = input(question + prompt).lower()
if default and not choice:
return default
try:
return valid[choice[0]]
except (KeyError, IndexError):
print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def print_batch_exception(batch_exception):
"""
Prints the contents of the specified Batch exception.
:param batch_exception:
"""
print('-------------------------------------------')
print('Exception encountered:')
if batch_exception.error and \
batch_exception.error.message and \
batch_exception.error.message.value:
print(batch_exception.error.message.value)
if batch_exception.error.values:
print()
for mesg in batch_exception.error.values:
print('{}:\t{}'.format(mesg.key, mesg.value))
print('-------------------------------------------')
def upload_file_to_container(block_blob_client, container_name, file_path):
"""
Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
print('Uploading file {} to container [{}]...'.format(file_path,
container_name))
block_blob_client.create_blob_from_path(container_name,
blob_name,
file_path)
# Obtain the SAS token for the container.
sas_token = get_container_sas_token(block_blob_client,
container_name, azureblob.BlobPermissions.READ)
sas_url = block_blob_client.make_blob_url(container_name,
blob_name,
sas_token=sas_token)
return batchmodels.ResourceFile(file_path=blob_name,
http_url=sas_url)
def get_container_sas_token(block_blob_client,
container_name, blob_permissions):
"""
Obtains a shared access signature granting the specified permissions to the
container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param BlobPermissions blob_permissions:
:rtype: str
:return: A SAS token granting the specified permissions to the container.
"""
# Obtain the SAS token for the container, setting the expiry time and
# permissions. In this case, no start time is specified, so the shared
# access signature becomes valid immediately. Expiration is in 2 hours.
container_sas_token = \
block_blob_client.generate_container_shared_access_signature(
container_name,
permission=blob_permissions,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
return container_sas_token
def get_container_sas_url(block_blob_client,
container_name, blob_permissions):
"""
Obtains a shared access signature URL that provides write access to the
ouput container to which the tasks will upload their output.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param BlobPermissions blob_permissions:
:rtype: str
:return: A SAS URL granting the specified permissions to the container.
"""
# Obtain the SAS token for the container.
sas_token = get_container_sas_token(block_blob_client,
container_name, azureblob.BlobPermissions.WRITE)
# Construct SAS URL for the container
container_sas_url = "https://{}.blob.core.windows.net/{}?{}".format(config._STORAGE_ACCOUNT_NAME, container_name, sas_token)
return container_sas_url
def create_pool(batch_service_client, pool_id):
"""
Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sky
"""
print('Creating pool [{}]...'.format(pool_id))
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
# The start task installs ffmpeg on each node from an available repository, using
# an administrator user identity.
new_pool = batch.models.PoolAddParameter(
id=pool_id,
virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
image_reference=batchmodels.ImageReference(
publisher="Canonical",
offer="UbuntuServer",
sku="18.04-LTS",
version="latest"
),
node_agent_sku_id="batch.node.ubuntu 18.04"),
vm_size=config._POOL_VM_SIZE,
target_dedicated_nodes=config._DEDICATED_POOL_NODE_COUNT,
target_low_priority_nodes=config._LOW_PRIORITY_POOL_NODE_COUNT,
start_task=batchmodels.StartTask(
command_line="/bin/bash -c \"apt-get update && apt-get install -y ffmpeg\"",
wait_for_success=True,
user_identity=batchmodels.UserIdentity(
auto_user=batchmodels.AutoUserSpecification(
scope=batchmodels.AutoUserScope.pool,
elevation_level=batchmodels.ElevationLevel.admin)),
)
)
batch_service_client.pool.add(new_pool)
def create_job(batch_service_client, job_id, pool_id):
"""
Creates a job with the specified ID, associated with the specified pool.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID for the job.
:param str pool_id: The ID for the pool.
"""
print('Creating job [{}]...'.format(job_id))
job = batch.models.JobAddParameter(
id=job_id,
pool_info=batch.models.PoolInformation(pool_id=pool_id))
batch_service_client.job.add(job)
def add_tasks(batch_service_client, job_id, input_files, output_container_sas_url):
"""
Adds a task for each input file in the collection to the specified job.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID of the job to which to add the tasks.
:param list input_files: A collection of input files. One task will be
created for each input file.
:param output_container_sas_token: A SAS token granting write access to
the specified Azure Blob storage container.
"""
print('Adding {} tasks to job [{}]...'.format(len(input_files), job_id))
tasks = list()
for idx, input_file in enumerate(input_files):
input_file_path=input_file.file_path
output_file_path="".join((input_file_path).split('.')[:-1]) + '.mp3'
command = "/bin/bash -c \"ffmpeg -i {} {} \"".format(input_file_path, output_file_path)
tasks.append(batch.models.TaskAddParameter(
id='Task{}'.format(idx),
command_line=command,
resource_files=[input_file],
output_files=[batchmodels.OutputFile(
file_pattern=output_file_path,
destination=batchmodels.OutputFileDestination(
container=batchmodels.OutputFileBlobContainerDestination(
container_url=output_container_sas_url)),
upload_options=batchmodels.OutputFileUploadOptions(
upload_condition=batchmodels.OutputFileUploadCondition.task_success))]
)
)
batch_service_client.task.add_collection(job_id, tasks)
def wait_for_tasks_to_complete(batch_service_client, job_id, timeout):
"""
Returns when all tasks in the specified job reach the Completed state.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The id of the job whose tasks should be monitored.
:param timedelta timeout: The duration to wait for task completion. If all
tasks in the specified job do not reach Completed state within this time
period, an exception will be raised.
"""
timeout_expiration = datetime.datetime.now() + timeout
print("Monitoring all tasks for 'Completed' state, timeout in {}..."
.format(timeout), end='')
while datetime.datetime.now() < timeout_expiration:
print('.', end='')
sys.stdout.flush()
tasks = batch_service_client.task.list(job_id)
incomplete_tasks = [task for task in tasks if
task.state != batchmodels.TaskState.completed]
if not incomplete_tasks:
print()
return True
else:
time.sleep(1)
print()
raise RuntimeError("ERROR: Tasks did not reach 'Completed' state within "
"timeout period of " + str(timeout))
if __name__ == '__main__':
start_time = datetime.datetime.now().replace(microsecond=0)
print('Sample start: {}'.format(start_time))
print()
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config._STORAGE_ACCOUNT_NAME,
account_key=config._STORAGE_ACCOUNT_KEY)
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
input_container_name = 'input'
output_container_name = 'output'
blob_client.create_container(input_container_name, fail_on_exist=False)
blob_client.create_container(output_container_name, fail_on_exist=False)
print('Container [{}] created.'.format(input_container_name))
print('Container [{}] created.'.format(output_container_name))
# Create a list of all MP4 files in the InputFiles directory.
input_file_paths = []
for folder, subs, files in os.walk(os.path.join(sys.path[0],'InputFiles')):
for filename in files:
if filename.endswith(".mp4"):
input_file_paths.append(os.path.abspath(os.path.join(folder, filename)))
# Upload the input files. This is the collection of files that are to be processed by the tasks.
input_files = [
upload_file_to_container(blob_client, input_container_name, file_path)
for file_path in input_file_paths]
# Obtain a shared access signature URL that provides write access to the output
# container to which the tasks will upload their output.
output_container_sas_url = get_container_sas_url(
blob_client,
output_container_name,
azureblob.BlobPermissions.WRITE)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batchauth.SharedKeyCredentials(config._BATCH_ACCOUNT_NAME,
config._BATCH_ACCOUNT_KEY)
batch_client = batch.BatchServiceClient(
credentials,
batch_url=config._BATCH_ACCOUNT_URL)
try:
# Create the pool that will contain the compute nodes that will execute the
# tasks.
create_pool(batch_client, config._POOL_ID)
# Create the job that will run the tasks.
create_job(batch_client, config._JOB_ID, config._POOL_ID)
# Add the tasks to the job. Pass the input files and a SAS URL
# to the storage container for output files.
add_tasks(batch_client, config._JOB_ID, input_files, output_container_sas_url)
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(batch_client,
config._JOB_ID,
datetime.timedelta(minutes=30))
print(" Success! All tasks reached the 'Completed' state within the "
"specified timeout period.")
except batchmodels.BatchErrorException as err:
print_batch_exception(err)
raise
# Delete input container in storage
print('Deleting container [{}]...'.format(input_container_name))
blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print('Sample end: {}'.format(end_time))
print('Elapsed time: {}'.format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
if query_yes_no('Delete job?') == 'yes':
batch_client.job.delete(config._JOB_ID)
if query_yes_no('Delete pool?') == 'yes':
batch_client.pool.delete(config._POOL_ID)
print()
input('Press ENTER to exit...')
|
the-stack_0_24331 | import os
def buildTests():
os.chdir("tests")
with open("README.md", 'w', encoding="utf-8") as readme:
print("<h1 align=\"center\">Tests</h1>\n", file=readme)
test_folders = [foldername for foldername in os.listdir() if os.path.isdir(foldername) and foldername != "small"]
headers = ["|", "BLAST", "My plot", "History GIF", "History text output"]
table = []
for test_folder in test_folders:
# os.chdir(test_folder)
blast_link = "https://raw.githubusercontent.com/npanuhin/BIOCAD_BWA/master/tests/{}/BLAST.png".format(test_folder)
plot_link = "https://raw.githubusercontent.com/npanuhin/BIOCAD_BWA/master/tests/{}/sam_analyze.png".format(test_folder)
histor_gif_link = "https://raw.githubusercontent.com/npanuhin/BIOCAD_BWA/master/tests/{}/history.gif".format(test_folder)
history_text_link = "https://github.com/npanuhin/BIOCAD_BWA/blob/master/tests/{}/history.txt".format(test_folder)
table.append([
test_folder,
"[BLAST]({} \"View image\")".format(blast_link.replace(' ', "%20")),
"[My plot]({} \"View image\")".format(plot_link.replace(' ', "%20")),
"[History GIF]({} \"View GIF\")".format(histor_gif_link.replace(' ', "%20")),
"[History text output]({} \"View JSON file\")".format(history_text_link.replace(' ', "%20"))
])
# os.chdir("../")
print("|".join(headers), file=readme)
print("|".join([":-:"] * len(headers)), file=readme)
for line in table:
print("|".join(line), file=readme)
os.chdir("../")
def main(root_path):
start_path = os.getcwd()
os.chdir(root_path)
buildTests()
os.chdir(start_path)
if __name__ == "__main__":
main("../")
|
the-stack_0_24333 | # One thousand thanks to Dave Humphrey's documentation
# http://en.uesp.net/morrow/tech/mw_esm.txt
import sys
import os
import traceback
import argparse
from parse_config import parse_config
from normalize_path import normalize_path
from record_classes import ESData
from load_es_file import read_elder_scrolls_file
from file_load_progress import update_load_progress
from explorer_commands import *
from command_help_text import *
# https://docs.python.org/2/library/readline.html
# https://stackoverflow.com/a/15416166/4099022
try:
import readline
except ImportError:
pass
config = {}
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--no-colors", action="store_true")
arg_parser.add_argument("--script-colors", action="store_true")
arg_parser.add_argument("--substring-colors", action="store_true")
arg_parser.add_argument("--function-colors", action="store_true")
arg_parser.add_argument("--npcat-colors", action="store_true")
arg_parser.add_argument("--load-paths", type=str, nargs="+")
arg_parser.add_argument("--config-path", type=str)
args = arg_parser.parse_args()
if args.script_colors:
config["script_syntax_highlighting"] = True
if args.substring_colors:
config["substring_highlighting"] = True
if args.function_colors:
config["info_function_highlighting"] = True
if args.npcat_colors:
config["npcat_list_highlighting"] = False
if args.no_colors:
config["substring_highlighting"] = False
config["script_syntax_highlighting"] = False
config["info_function_highlighting"] = False
config["npcat_list_highlighting"] = False
config_path = "config.ini"
if args.config_path:
config_path = args.config_path # os.path.join(os.getcwd(), args.config_path)
if os.path.exists(config_path):
parsed_config_object = None
try:
parsed_config_object = parse_config(config_path)
except Exception as e:
print("Failed to parse config file: " + str(e))
if parsed_config_object:
for key in parsed_config_object:
if key not in config:
config[key] = parsed_config_object[key]
elif args.config_path:
print("Couldn't find config file at \"%s\"." % config_path)
es = ESData([])
load_path_list = []
if isinstance(config.get("load_paths"), list):
load_path_list.extend(config.get("load_paths"))
if args.load_paths:
load_path_list.extend(args.load_paths)
if len(load_path_list):
loaded_count = 0
for load_path in config["load_paths"]:
path = os.path.normpath(normalize_path(load_path))
if not os.path.exists(path):
print("File \"%s\" not found." % path)
continue
try:
with open(path, "rb") as binary_file:
es_file = read_elder_scrolls_file(path, binary_file,
after_read_record=update_load_progress(path)
)
es.add_file(es_file)
loaded_count += 1
print("")
except:
print("")
print("FILE LOAD ERROR. Please report bugs online at")
print(" https://github.com/pineapplemachine/mwde/issues")
print(traceback.format_exc())
if len(config["load_paths"]):
print("Finished loading %d data files." % loaded_count)
# "E:/SteamLibrary/steamapps/common/Morrowind/Data Files/Morrowind.esm",
# "E:/SteamLibrary/steamapps/common/Morrowind/Data Files/Tribunal.esm",
# "E:/SteamLibrary/steamapps/common/Morrowind/Data Files/Bloodmoon.esm",
# "E:/SteamLibrary/steamapps/common/Morrowind/Data Files/MoreQuests_WolverineMages.ESP",
def parse_command(input_text):
command = ""
options = {}
text = ""
first_space = False
last_was_space = False
in_text = False
in_option = False
for char in input_text.strip():
if not in_text and (char == " " or char == "\t"):
first_space = True
last_was_space = True
elif last_was_space and not in_text and char == "-":
in_option = True
last_was_space = False
elif in_option:
options[char] = True
in_option = False
elif not first_space:
command += char
else:
text += char
in_text = True
return (command, options, text)
try:
get_input = raw_input
except NameError:
get_input = input
if config.get("substring_highlighting") or config.get("script_syntax_highlighting"):
sys.stdout.write("\033[0m")
print("\nMorrowind Dialog Explorer v1.2 is ready.")
while True:
try:
command, flags, text = parse_command(get_input("query > "))
if command == "help":
print("\n" + {
"sub": sub_help_text,
"re": re_help_text,
"npc": npc_help_text,
"race": race_help_text,
"faction": faction_help_text,
"cell": cell_help_text,
"topic": topic_help_text,
"journal": journal_help_text,
"quest": quest_help_text,
"npcat": npcat_help_text,
"load": load_help_text,
"reload": reload_help_text,
"unload": unload_help_text,
"about": about_text,
"quit": quit_help_text,
"exit": quit_help_text,
}.get(text, help_text) + "\n")
elif command == "about":
print("\n" + about_text + "\n")
elif command == "quit" or command == "exit":
print("\nExiting Morrowind Dialog Explorer.\n")
break
elif command == "sub":
do_sub(es, config, text, flags)
elif command == "re":
do_re(es, config, text, flags)
elif command == "npc":
do_npc(es, config, text, flags)
elif command == "race":
do_race(es, config, text, flags)
elif command == "faction":
do_faction(es, config, text, flags)
elif command == "cell":
do_cell(es, config, text, flags)
elif command == "topic":
do_topic(es, config, text, flags)
elif command == "journal":
do_journal(es, config, text, flags)
elif command == "quest":
do_quest(es, config, text, flags)
elif command == "npcat":
do_npcat(es, config, text, flags)
elif command == "load":
do_load(es, config, text, flags)
elif command == "reload":
do_reload(es, config, text, flags)
elif command == "unload":
do_unload(es, config, text, flags)
elif len(command):
print("Unknown command. Type \"help\" for help.")
except EOFError:
print("") # Happens sometimes with funny control keys
except SystemError:
pass # Happens sometimes with funny control keys
except KeyboardInterrupt:
print("\n\nExiting Morrowind Dialog Explorer.\n")
break
except Exception as e:
print("\nCOMMAND ERROR. Please report bugs online at")
print(" https://github.com/pineapplemachine/mwde/issues")
print(traceback.format_exc())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.