ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a33654f19204ceeb8cfb59ff9ad6cdec7391d7d | #!/usr/bin/env python3
import sys
# Read input and output files
infile = sys.argv[1]
outfile = sys.argv[2]
# Create list to store features in
features = []
# Iterate over lines in input Genbank file
for line in open(infile).readlines():
# Store current sequence name
if line.startswith("LOCUS"):
sequence = line.split()[1]
# Store current feature properties
if not line.startswith(" "*6) and line.startswith(" "*5):
# Remove leading and trailing whitespace and split
line = line.strip().split()
# Determine feature type and initialize feature in features list
features.append({"sequence":sequence, "feature":line[0]})
# Determine feature strand
if "complement" in line[1]:
features[-1]["strand"] = "-"
else:
features[-1]["strand"] = "+"
# Remove junk from range
line[1] = line[1].replace("join", "").replace("complement", "")
line[1] = line[1].replace("(", "").replace(")", "")
# Determine feature range
range_values = line[1].replace(",", "..").split("..")
from_to = [range_values[0], range_values[-1]]
# Fix for "join" ranges
if len(range_values) == 4:
if range_values[0] < range_values[3]:
from_to = [range_values[2], range_values[1]]
# Store initial feature attributes
features[-1].update({
"start":from_to[0].replace("<","").replace(">",""),
"end":from_to[1].replace("<","").replace(">",""),
"pseudo":False, "product":""
})
# Skip features with "order"
order = "order" in line[1]
# Determine attributes of interest
elif line.startswith(" "*21):
# Skip features with "order"
if order:
continue
# Remove leading and trailing whitespace
line = line.strip()
# Check for current attribute
if line.startswith("/"):
line = line.lstrip("/").split("=", maxsplit=1)
attribute = line[0]
# Store attribute value
if len(line) > 1:
features[-1][attribute] = line[1].strip('"')
else:
features[-1][attribute] = True
else:
# Continue adding to value from following rows
if not attribute == "translation":
features[-1][attribute] += " "
features[-1][attribute] += line.split('=', maxsplit=1)[0].strip('"')
# Count all old_locus_tag, locus_tag, and gene to find non-unique tags
tag_counts = {"old_locus_tag":{}, "locus_tag":{}, "gene":{}}
for i in range(len(features)):
# Only consider coding sequences
if not features[i]['feature'] == "CDS":
continue
# Count old_locus_tag
try:
tag_counts['old_locus_tag'][features[i]['old_locus_tag']] += 1
except KeyError:
try:
tag_counts['old_locus_tag'][features[i]['old_locus_tag']] = 1
except KeyError:
pass
# Count locus_tag
try:
tag_counts['locus_tag'][features[i]['locus_tag']] += 1
except KeyError:
try:
tag_counts['locus_tag'][features[i]['locus_tag']] = 1
except KeyError:
pass
# Count gene
try:
tag_counts['gene'][features[i]['gene']] += 1
except KeyError:
try:
tag_counts['gene'][features[i]['gene']] = 1
except KeyError:
pass
# Identify all non-unique old_locus_tag, locus_tag, and gene tags
non_uniq = {
'old_locus_tag':set(filter(
lambda x: tag_counts['old_locus_tag'][x] > 1,
tag_counts['old_locus_tag']
)),
'locus_tag':set(filter(
lambda x: tag_counts['locus_tag'][x] > 1,
tag_counts['locus_tag']
)),
'gene':set(filter(
lambda x: tag_counts['gene'][x] > 1,
tag_counts['gene']
))
}
# Rename all features that are non-unique
def rename_feature(feature, feature_type):
try:
if feature[feature_type] in non_uniq[feature_type]:
feature[feature_type] = "_".join([
feature[feature_type], feature['sequence'],
feature['start'], feature['end'], feature['strand']
])
except KeyError:
pass
return feature
# Write features to GFF
outfile = open(outfile, 'w')
for i in range(len(features)):
# Select feature
feature = features[i]
# Only consider coding sequences
if not feature['feature'] == "CDS":
continue
# Rename non-unique tags
for feature_type in ['old_locus_tag', 'locus_tag', 'gene']:
feature = rename_feature(feature, feature_type)
# Write column 1: Sequence
output = feature['sequence'] + "\t"
# Write column 2: Source
output += 'Custom' + "\t"
# Write column 3: Type
output += 'gene' + "\t"
# Write column 4: Start
output += feature['start'] + "\t"
# Write column 5: End
output += feature['end'] + "\t"
# Write column 6: Score
output += '.' + "\t"
# Write column 7: Strand
output += feature['strand'] + "\t"
# Write column 8: Frame
output += '0' + "\t"
# Write column 9: Attributes
try:
locus_tag = feature['old_locus_tag']
except KeyError:
try:
locus_tag = feature['locus_tag']
except KeyError:
locus_tag = feature['gene']
try:
ID = feature['locus_tag']
except KeyError:
ID = feature['gene']
locus_tag = "locus_tag=" + locus_tag
ID = "ID=" + ID
product = "product=" + feature['product'].replace(";", "_")
output += ";".join([product, locus_tag, ID]) + "\n"
junk = outfile.write(output)
outfile.close()
|
py | 1a336565748028650959b90f21822cd47f4b92b8 | import math
import torch
from flambe.metric import MultiLabelCrossEntropy, MultiLabelNLLLoss
def test_cross_entropy_one_hot():
"""Test cross entropy loss when one hot"""
y_pred = torch.tensor([[0.2, 0.8], [0.9, 0.1]])
y_true = torch.tensor([[1, 0], [1, 0]])
loss = MultiLabelCrossEntropy()
assert abs(loss(y_pred, y_true).item() - 0.70429) < 1e-2
def test_nllloss_one_hot():
"""Test negative log likelihood loss when one hot"""
y_pred = torch.tensor([[0.2, 0.8], [0.9, 0.1]])
y_true = torch.tensor([[1, 0], [1, 0]])
loss = MultiLabelNLLLoss()
assert abs(loss(y_pred, y_true).item() + 0.55) < 1e-2
|
py | 1a3366aab3e466142f51971f1e43fb9488ac12b9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'robot.ui'
#
# Created: Sat Feb 21 20:25:38 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
import rospy
import actionlib
from move_base_msgs.msg import *
import time
from PyQt4 import QtCore, QtGui
table_position = dict()
table_position[0] = (-0.465, 0.37, 0.010, 0, 0, 0.998, 0.069)
table_position[1] = (0.599, 1.03, 0.010, 0, 0, 1.00, -0.020)
table_position[2] = (4.415, 0.645, 0.010, 0, 0, -0.034, 0.999)
table_position[3] = (7.409, 0.812, 0.010, 0, 0, -0.119, 0.993)
table_position[4] = (1.757, 4.377, 0.010, 0, 0, -0.040, 0.999)
table_position[5] = (1.757, 4.377, 0.010, 0, 0, -0.040, 0.999)
table_position[6] = (1.757, 4.377, 0.010, 0, 0, -0.040, 0.999)
table_position[7] = (1.757, 4.377, 0.010, 0, 0, -0.040, 0.999)
table_position[8] = (1.757, 4.377, 0.010, 0, 0, -0.040, 0.999)
table_position[9] = (1.757, 4.377, 0.010, 0, 0, -0.040, 0.999)
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(376, 338)
self.spinBox = QtGui.QSpinBox(Form)
self.spinBox.setGeometry(QtCore.QRect(20, 160, 161, 121))
font = QtGui.QFont()
font.setPointSize(35)
font.setBold(True)
font.setWeight(75)
self.spinBox.setFont(font)
self.spinBox.setMaximum(9)
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(20, 120, 111, 21))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(220, 190, 131, 41))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton_2 = QtGui.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(220, 240, 131, 41))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(220, 140, 131, 41))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.progressBar = QtGui.QProgressBar(Form)
self.progressBar.setGeometry(QtCore.QRect(20, 60, 118, 23))
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(20, 20, 111, 21))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(200, 20, 111, 21))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_4 = QtGui.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(190, 60, 131, 31))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setText(_fromUtf8(""))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.table_no = 0
self.current_table_position = 0
self.client = actionlib.SimpleActionClient('move_base',MoveBaseAction)
self.goal = MoveBaseGoal()
self.update_values()
self.retranslateUi(Form)
QtCore.QObject.connect(self.spinBox, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.set_table_number)
QtCore.QObject.connect(self.pushButton_3, QtCore.SIGNAL(_fromUtf8("clicked()")), self.Home)
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.Go)
QtCore.QObject.connect(self.pushButton_2, QtCore.SIGNAL(_fromUtf8("clicked()")), self.Cancel)
QtCore.QMetaObject.connectSlotsByName(Form)
def set_table_number(self):
self.table_no = self.spinBox.value()
self.current_table_position = table_position[self.table_no]
print self.current_table_position
def Go(self):
print "Go"
print "Waiting for server"
# self.client.wait_for_server()
self.goal.target_pose.pose.position.x=float(self.current_table_position[0])
self.goal.target_pose.pose.position.y=float(self.current_table_position[1])
self.goal.target_pose.pose.position.z=float(self.current_table_position[2])
self.goal.target_pose.pose.orientation.x = float(self.current_table_position[3])
self.goal.target_pose.pose.orientation.y= float(self.current_table_position[4])
self.goal.target_pose.pose.orientation.z= float(self.current_table_position[5])
self.goal.target_pose.header.frame_id= 'map'
self.goal.target_pose.header.stamp = rospy.Time.now()
# print temp_table_pose[0]
# print temp_table_pose[1]
print "Go"
self.client.send_goal(self.goal)
# self.client.wait_for_result()
# rospy.loginfo(self.client.get_result())
def Cancel(self):
print "Cancel"
self.client.cancel_all_goals()
def Home(self):
print "Home"
self.current_table_position = table_position[0]
self.Go()
def add(self,text):
battery_value = rospy.get_param("battery_value")
robot_status = rospy.get_param("robot_status")
self.progressBar.setProperty("value", battery_value)
self.label_4.setText(_fromUtf8(robot_status))
def update_values(self):
self.thread = WorkThread()
QtCore.QObject.connect( self.thread, QtCore.SIGNAL("update(QString)"), self.add )
self.thread.start()
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Robot", None))
self.label.setText(_translate("Form", "Table No(1-9)", None))
self.pushButton.setText(_translate("Form", "Go", None))
self.pushButton_2.setText(_translate("Form", "Cancel", None))
self.pushButton_3.setText(_translate("Form", "Home", None))
self.label_2.setText(_translate("Form", "Battery Level", None))
self.label_3.setText(_translate("Form", "Robot Status", None))
class WorkThread(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
def __del__(self):
self.wait()
def run(self):
while True:
time.sleep(0.3) # artificial time delay
self.emit( QtCore.SIGNAL('update(QString)'), " " )
# print "Hello"
return
if __name__ == "__main__":
import sys
rospy.init_node('robot_gui')
rospy.set_param('battery_value',0)
rospy.set_param('robot_status'," ")
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
py | 1a336842e55b6c1e36990b3ee28f9c8a6905b6e0 | from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(
"about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("project.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
|
py | 1a3369603759ed3cf10f156fec751d48d441064d | ###############################################################################
# #
# This file is part of IfcOpenShell. #
# #
# IfcOpenShell is free software: you can redistribute it and/or modify #
# it under the terms of the Lesser GNU General Public License as published by #
# the Free Software Foundation, either version 3.0 of the License, or #
# (at your option) any later version. #
# #
# IfcOpenShell is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# Lesser GNU General Public License for more details. #
# #
# You should have received a copy of the Lesser GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from . import occ_utils as utils
from .main import *
|
py | 1a3369674bdc61d3c216b2c88d9ef9505783b278 | #!/home/lichess4545/web/www.lichess4545.com/env/bin/python
"""
WSGI config for heltour project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
local_dir = os.path.join(os.path.dirname(__file__))
activate_this = '/home/lichess4545/web/www.lichess4545.com/env/bin/activate_this.py'
if os.path.exists(activate_this):
exec(compile(open(activate_this).read(), activate_this, 'exec'), dict(__file__=activate_this))
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("HELTOUR_ENV", "LIVE")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "heltour.settings")
application = get_wsgi_application()
|
py | 1a336a4abcafdb18e6e62596821e34b4329fef1c | import copy
import ntpath
import pathlib
import posixpath
import sys
import unittest
from test.support import verbose
try:
# If we are in a source tree, use the original source file for tests
SOURCE = (pathlib.Path(__file__).absolute().parent.parent.parent / "Modules/getpath.py").read_bytes()
except FileNotFoundError:
# Try from _testcapimodule instead
from _testinternalcapi import get_getpath_codeobject
SOURCE = get_getpath_codeobject()
class MockGetPathTests(unittest.TestCase):
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
self.maxDiff = None
def test_normal_win32(self):
"Test a 'standard' install layout on Windows."
ns = MockNTNamespace(
argv0=r"C:\Python\python.exe",
real_executable=r"C:\Python\python.exe",
)
ns.add_known_xfile(r"C:\Python\python.exe")
ns.add_known_file(r"C:\Python\Lib\os.py")
ns.add_known_dir(r"C:\Python\DLLs")
expected = dict(
executable=r"C:\Python\python.exe",
base_executable=r"C:\Python\python.exe",
prefix=r"C:\Python",
exec_prefix=r"C:\Python",
module_search_paths_set=1,
module_search_paths=[
r"C:\Python\python98.zip",
r"C:\Python\Lib",
r"C:\Python\DLLs",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_buildtree_win32(self):
"Test an in-build-tree layout on Windows."
ns = MockNTNamespace(
argv0=r"C:\CPython\PCbuild\amd64\python.exe",
real_executable=r"C:\CPython\PCbuild\amd64\python.exe",
)
ns.add_known_xfile(r"C:\CPython\PCbuild\amd64\python.exe")
ns.add_known_file(r"C:\CPython\Lib\os.py")
ns.add_known_file(r"C:\CPython\PCbuild\amd64\pybuilddir.txt", [""])
expected = dict(
executable=r"C:\CPython\PCbuild\amd64\python.exe",
base_executable=r"C:\CPython\PCbuild\amd64\python.exe",
prefix=r"C:\CPython",
exec_prefix=r"C:\CPython",
build_prefix=r"C:\CPython",
_is_python_build=1,
module_search_paths_set=1,
module_search_paths=[
r"C:\CPython\PCbuild\amd64\python98.zip",
r"C:\CPython\Lib",
r"C:\CPython\PCbuild\amd64",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_venv_win32(self):
"""Test a venv layout on Windows.
This layout is discovered by the presence of %__PYVENV_LAUNCHER__%,
specifying the original launcher executable. site.py is responsible
for updating prefix and exec_prefix.
"""
ns = MockNTNamespace(
argv0=r"C:\Python\python.exe",
ENV___PYVENV_LAUNCHER__=r"C:\venv\Scripts\python.exe",
real_executable=r"C:\Python\python.exe",
)
ns.add_known_xfile(r"C:\Python\python.exe")
ns.add_known_xfile(r"C:\venv\Scripts\python.exe")
ns.add_known_file(r"C:\Python\Lib\os.py")
ns.add_known_dir(r"C:\Python\DLLs")
ns.add_known_file(r"C:\venv\pyvenv.cfg", [
r"home = C:\Python"
])
expected = dict(
executable=r"C:\venv\Scripts\python.exe",
prefix=r"C:\Python",
exec_prefix=r"C:\Python",
base_executable=r"C:\Python\python.exe",
base_prefix=r"C:\Python",
base_exec_prefix=r"C:\Python",
module_search_paths_set=1,
module_search_paths=[
r"C:\Python\python98.zip",
r"C:\Python\DLLs",
r"C:\Python\Lib",
r"C:\Python",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_registry_win32(self):
"""Test registry lookup on Windows.
On Windows there are registry entries that are intended for other
applications to register search paths.
"""
hkey = rf"HKLM\Software\Python\PythonCore\9.8-XY\PythonPath"
winreg = MockWinreg({
hkey: None,
f"{hkey}\\Path1": "path1-dir",
f"{hkey}\\Path1\\Subdir": "not-subdirs",
})
ns = MockNTNamespace(
argv0=r"C:\Python\python.exe",
real_executable=r"C:\Python\python.exe",
winreg=winreg,
)
ns.add_known_xfile(r"C:\Python\python.exe")
ns.add_known_file(r"C:\Python\Lib\os.py")
ns.add_known_dir(r"C:\Python\DLLs")
expected = dict(
module_search_paths_set=1,
module_search_paths=[
r"C:\Python\python98.zip",
"path1-dir",
# should not contain not-subdirs
r"C:\Python\Lib",
r"C:\Python\DLLs",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
ns["config"]["use_environment"] = 0
ns["config"]["module_search_paths_set"] = 0
ns["config"]["module_search_paths"] = None
expected = dict(
module_search_paths_set=1,
module_search_paths=[
r"C:\Python\python98.zip",
r"C:\Python\Lib",
r"C:\Python\DLLs",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_symlink_normal_win32(self):
"Test a 'standard' install layout via symlink on Windows."
ns = MockNTNamespace(
argv0=r"C:\LinkedFrom\python.exe",
real_executable=r"C:\Python\python.exe",
)
ns.add_known_xfile(r"C:\LinkedFrom\python.exe")
ns.add_known_xfile(r"C:\Python\python.exe")
ns.add_known_link(r"C:\LinkedFrom\python.exe", r"C:\Python\python.exe")
ns.add_known_file(r"C:\Python\Lib\os.py")
ns.add_known_dir(r"C:\Python\DLLs")
expected = dict(
executable=r"C:\LinkedFrom\python.exe",
base_executable=r"C:\LinkedFrom\python.exe",
prefix=r"C:\Python",
exec_prefix=r"C:\Python",
module_search_paths_set=1,
module_search_paths=[
r"C:\Python\python98.zip",
r"C:\Python\Lib",
r"C:\Python\DLLs",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_symlink_buildtree_win32(self):
"Test an in-build-tree layout via symlink on Windows."
ns = MockNTNamespace(
argv0=r"C:\LinkedFrom\python.exe",
real_executable=r"C:\CPython\PCbuild\amd64\python.exe",
)
ns.add_known_xfile(r"C:\LinkedFrom\python.exe")
ns.add_known_xfile(r"C:\CPython\PCbuild\amd64\python.exe")
ns.add_known_link(r"C:\LinkedFrom\python.exe", r"C:\CPython\PCbuild\amd64\python.exe")
ns.add_known_file(r"C:\CPython\Lib\os.py")
ns.add_known_file(r"C:\CPython\PCbuild\amd64\pybuilddir.txt", [""])
expected = dict(
executable=r"C:\LinkedFrom\python.exe",
base_executable=r"C:\LinkedFrom\python.exe",
prefix=r"C:\CPython",
exec_prefix=r"C:\CPython",
build_prefix=r"C:\CPython",
_is_python_build=1,
module_search_paths_set=1,
module_search_paths=[
r"C:\CPython\PCbuild\amd64\python98.zip",
r"C:\CPython\Lib",
r"C:\CPython\PCbuild\amd64",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_buildtree_pythonhome_win32(self):
"Test an out-of-build-tree layout on Windows with PYTHONHOME override."
ns = MockNTNamespace(
argv0=r"C:\Out\python.exe",
real_executable=r"C:\Out\python.exe",
ENV_PYTHONHOME=r"C:\CPython",
)
ns.add_known_xfile(r"C:\Out\python.exe")
ns.add_known_file(r"C:\CPython\Lib\os.py")
ns.add_known_file(r"C:\Out\pybuilddir.txt", [""])
expected = dict(
executable=r"C:\Out\python.exe",
base_executable=r"C:\Out\python.exe",
prefix=r"C:\CPython",
exec_prefix=r"C:\CPython",
# This build_prefix is a miscalculation, because we have
# moved the output direction out of the prefix.
# Specify PYTHONHOME to get the correct prefix/exec_prefix
build_prefix="C:\\",
_is_python_build=1,
module_search_paths_set=1,
module_search_paths=[
r"C:\Out\python98.zip",
r"C:\CPython\Lib",
r"C:\Out",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_normal_posix(self):
"Test a 'standard' install layout on *nix"
ns = MockPosixNamespace(
PREFIX="/usr",
argv0="python",
ENV_PATH="/usr/bin",
)
ns.add_known_xfile("/usr/bin/python")
ns.add_known_file("/usr/lib/python9.8/os.py")
ns.add_known_dir("/usr/lib/python9.8/lib-dynload")
expected = dict(
executable="/usr/bin/python",
base_executable="/usr/bin/python",
prefix="/usr",
exec_prefix="/usr",
module_search_paths_set=1,
module_search_paths=[
"/usr/lib/python98.zip",
"/usr/lib/python9.8",
"/usr/lib/python9.8/lib-dynload",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_buildpath_posix(self):
"""Test an in-build-tree layout on POSIX.
This layout is discovered from the presence of pybuilddir.txt, which
contains the relative path from the executable's directory to the
platstdlib path.
"""
ns = MockPosixNamespace(
argv0=r"/home/cpython/python",
PREFIX="/usr/local",
)
ns.add_known_xfile("/home/cpython/python")
ns.add_known_xfile("/usr/local/bin/python")
ns.add_known_file("/home/cpython/pybuilddir.txt", ["build/lib.linux-x86_64-9.8"])
ns.add_known_file("/home/cpython/Lib/os.py")
ns.add_known_dir("/home/cpython/lib-dynload")
expected = dict(
executable="/home/cpython/python",
prefix="/usr/local",
exec_prefix="/usr/local",
base_executable="/home/cpython/python",
build_prefix="/home/cpython",
_is_python_build=1,
module_search_paths_set=1,
module_search_paths=[
"/usr/local/lib/python98.zip",
"/home/cpython/Lib",
"/home/cpython/build/lib.linux-x86_64-9.8",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_venv_posix(self):
"Test a venv layout on *nix."
ns = MockPosixNamespace(
argv0="python",
PREFIX="/usr",
ENV_PATH="/venv/bin:/usr/bin",
)
ns.add_known_xfile("/usr/bin/python")
ns.add_known_xfile("/venv/bin/python")
ns.add_known_file("/usr/lib/python9.8/os.py")
ns.add_known_dir("/usr/lib/python9.8/lib-dynload")
ns.add_known_file("/venv/pyvenv.cfg", [
r"home = /usr/bin"
])
expected = dict(
executable="/venv/bin/python",
prefix="/usr",
exec_prefix="/usr",
base_executable="/usr/bin/python",
base_prefix="/usr",
base_exec_prefix="/usr",
module_search_paths_set=1,
module_search_paths=[
"/usr/lib/python98.zip",
"/usr/lib/python9.8",
"/usr/lib/python9.8/lib-dynload",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_symlink_normal_posix(self):
"Test a 'standard' install layout via symlink on *nix"
ns = MockPosixNamespace(
PREFIX="/usr",
argv0="/linkfrom/python",
)
ns.add_known_xfile("/linkfrom/python")
ns.add_known_xfile("/usr/bin/python")
ns.add_known_link("/linkfrom/python", "/usr/bin/python")
ns.add_known_file("/usr/lib/python9.8/os.py")
ns.add_known_dir("/usr/lib/python9.8/lib-dynload")
expected = dict(
executable="/linkfrom/python",
base_executable="/linkfrom/python",
prefix="/usr",
exec_prefix="/usr",
module_search_paths_set=1,
module_search_paths=[
"/usr/lib/python98.zip",
"/usr/lib/python9.8",
"/usr/lib/python9.8/lib-dynload",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_symlink_buildpath_posix(self):
"""Test an in-build-tree layout on POSIX.
This layout is discovered from the presence of pybuilddir.txt, which
contains the relative path from the executable's directory to the
platstdlib path.
"""
ns = MockPosixNamespace(
argv0=r"/linkfrom/python",
PREFIX="/usr/local",
)
ns.add_known_xfile("/linkfrom/python")
ns.add_known_xfile("/home/cpython/python")
ns.add_known_link("/linkfrom/python", "/home/cpython/python")
ns.add_known_xfile("/usr/local/bin/python")
ns.add_known_file("/home/cpython/pybuilddir.txt", ["build/lib.linux-x86_64-9.8"])
ns.add_known_file("/home/cpython/Lib/os.py")
ns.add_known_dir("/home/cpython/lib-dynload")
expected = dict(
executable="/linkfrom/python",
prefix="/usr/local",
exec_prefix="/usr/local",
base_executable="/linkfrom/python",
build_prefix="/home/cpython",
_is_python_build=1,
module_search_paths_set=1,
module_search_paths=[
"/usr/local/lib/python98.zip",
"/home/cpython/Lib",
"/home/cpython/build/lib.linux-x86_64-9.8",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_custom_platlibdir_posix(self):
"Test an install with custom platlibdir on *nix"
ns = MockPosixNamespace(
PREFIX="/usr",
argv0="/linkfrom/python",
PLATLIBDIR="lib64",
)
ns.add_known_xfile("/usr/bin/python")
ns.add_known_file("/usr/lib64/python9.8/os.py")
ns.add_known_dir("/usr/lib64/python9.8/lib-dynload")
expected = dict(
executable="/linkfrom/python",
base_executable="/linkfrom/python",
prefix="/usr",
exec_prefix="/usr",
module_search_paths_set=1,
module_search_paths=[
"/usr/lib64/python98.zip",
"/usr/lib64/python9.8",
"/usr/lib64/python9.8/lib-dynload",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_venv_macos(self):
"""Test a venv layout on macOS.
This layout is discovered when 'executable' and 'real_executable' match,
but $__PYVENV_LAUNCHER__ has been set to the original process.
"""
ns = MockPosixNamespace(
os_name="darwin",
argv0="/usr/bin/python",
PREFIX="/usr",
ENV___PYVENV_LAUNCHER__="/framework/Python9.8/python",
real_executable="/usr/bin/python",
)
ns.add_known_xfile("/usr/bin/python")
ns.add_known_xfile("/framework/Python9.8/python")
ns.add_known_file("/usr/lib/python9.8/os.py")
ns.add_known_dir("/usr/lib/python9.8/lib-dynload")
ns.add_known_file("/framework/Python9.8/pyvenv.cfg", [
"home = /usr/bin"
])
expected = dict(
executable="/framework/Python9.8/python",
prefix="/usr",
exec_prefix="/usr",
base_executable="/usr/bin/python",
base_prefix="/usr",
base_exec_prefix="/usr",
module_search_paths_set=1,
module_search_paths=[
"/usr/lib/python98.zip",
"/usr/lib/python9.8",
"/usr/lib/python9.8/lib-dynload",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_symlink_normal_macos(self):
"Test a 'standard' install layout via symlink on macOS"
ns = MockPosixNamespace(
os_name="darwin",
PREFIX="/usr",
argv0="python",
ENV_PATH="/linkfrom:/usr/bin",
# real_executable on macOS matches the invocation path
real_executable="/linkfrom/python",
)
ns.add_known_xfile("/linkfrom/python")
ns.add_known_xfile("/usr/bin/python")
ns.add_known_link("/linkfrom/python", "/usr/bin/python")
ns.add_known_file("/usr/lib/python9.8/os.py")
ns.add_known_dir("/usr/lib/python9.8/lib-dynload")
expected = dict(
executable="/linkfrom/python",
base_executable="/linkfrom/python",
prefix="/usr",
exec_prefix="/usr",
module_search_paths_set=1,
module_search_paths=[
"/usr/lib/python98.zip",
"/usr/lib/python9.8",
"/usr/lib/python9.8/lib-dynload",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_symlink_buildpath_macos(self):
"""Test an in-build-tree layout via symlink on macOS.
This layout is discovered from the presence of pybuilddir.txt, which
contains the relative path from the executable's directory to the
platstdlib path.
"""
ns = MockPosixNamespace(
os_name="darwin",
argv0=r"python",
ENV_PATH="/linkfrom:/usr/bin",
PREFIX="/usr/local",
# real_executable on macOS matches the invocation path
real_executable="/linkfrom/python",
)
ns.add_known_xfile("/linkfrom/python")
ns.add_known_xfile("/home/cpython/python")
ns.add_known_link("/linkfrom/python", "/home/cpython/python")
ns.add_known_xfile("/usr/local/bin/python")
ns.add_known_file("/home/cpython/pybuilddir.txt", ["build/lib.macos-9.8"])
ns.add_known_file("/home/cpython/Lib/os.py")
ns.add_known_dir("/home/cpython/lib-dynload")
expected = dict(
executable="/linkfrom/python",
prefix="/usr/local",
exec_prefix="/usr/local",
base_executable="/linkfrom/python",
build_prefix="/home/cpython",
_is_python_build=1,
module_search_paths_set=1,
module_search_paths=[
"/usr/local/lib/python98.zip",
"/home/cpython/Lib",
"/home/cpython/build/lib.macos-9.8",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
# ******************************************************************************
DEFAULT_NAMESPACE = dict(
PREFIX="",
EXEC_PREFIX="",
PYTHONPATH="",
VPATH="",
PLATLIBDIR="",
PYDEBUGEXT="",
VERSION_MAJOR=9, # fixed version number for ease
VERSION_MINOR=8, # of testing
PYWINVER=None,
EXE_SUFFIX=None,
ENV_PATH="",
ENV_PYTHONHOME="",
ENV_PYTHONEXECUTABLE="",
ENV___PYVENV_LAUNCHER__="",
argv0="",
py_setpath="",
real_executable="",
executable_dir="",
library="",
winreg=None,
build_prefix=None,
venv_prefix=None,
)
DEFAULT_CONFIG = dict(
home=None,
platlibdir=None,
pythonpath=None,
program_name=None,
prefix=None,
exec_prefix=None,
base_prefix=None,
base_exec_prefix=None,
executable=None,
base_executable="",
stdlib_dir=None,
platstdlib_dir=None,
module_search_paths=None,
module_search_paths_set=0,
pythonpath_env=None,
argv=None,
orig_argv=None,
isolated=0,
use_environment=1,
use_site=1,
)
class MockNTNamespace(dict):
def __init__(self, *a, argv0=None, config=None, **kw):
self.update(DEFAULT_NAMESPACE)
self["config"] = DEFAULT_CONFIG.copy()
self["os_name"] = "nt"
self["PLATLIBDIR"] = "DLLs"
self["PYWINVER"] = "9.8-XY"
self["VPATH"] = r"..\.."
super().__init__(*a, **kw)
if argv0:
self["config"]["orig_argv"] = [argv0]
if config:
self["config"].update(config)
self._files = {}
self._links = {}
self._dirs = set()
self._warnings = []
def add_known_file(self, path, lines=None):
self._files[path.casefold()] = list(lines or ())
self.add_known_dir(path.rpartition("\\")[0])
def add_known_xfile(self, path):
self.add_known_file(path)
def add_known_link(self, path, target):
self._links[path.casefold()] = target
def add_known_dir(self, path):
p = path.rstrip("\\").casefold()
while p:
self._dirs.add(p)
p = p.rpartition("\\")[0]
def __missing__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key) from None
def abspath(self, path):
if self.isabs(path):
return path
return self.joinpath("C:\\Absolute", path)
def basename(self, path):
return path.rpartition("\\")[2]
def dirname(self, path):
name = path.rstrip("\\").rpartition("\\")[0]
if name[1:] == ":":
return name + "\\"
return name
def hassuffix(self, path, suffix):
return path.casefold().endswith(suffix.casefold())
def isabs(self, path):
return path[1:3] == ":\\"
def isdir(self, path):
if verbose:
print("Check if", path, "is a dir")
return path.casefold() in self._dirs
def isfile(self, path):
if verbose:
print("Check if", path, "is a file")
return path.casefold() in self._files
def ismodule(self, path):
if verbose:
print("Check if", path, "is a module")
path = path.casefold()
return path in self._files and path.rpartition(".")[2] == "py".casefold()
def isxfile(self, path):
if verbose:
print("Check if", path, "is a executable")
path = path.casefold()
return path in self._files and path.rpartition(".")[2] == "exe".casefold()
def joinpath(self, *path):
return ntpath.normpath(ntpath.join(*path))
def readlines(self, path):
try:
return self._files[path.casefold()]
except KeyError:
raise FileNotFoundError(path) from None
def realpath(self, path, _trail=None):
if verbose:
print("Read link from", path)
try:
link = self._links[path.casefold()]
except KeyError:
return path
if _trail is None:
_trail = set()
elif link.casefold() in _trail:
raise OSError("circular link")
_trail.add(link.casefold())
return self.realpath(link, _trail)
def warn(self, message):
self._warnings.append(message)
if verbose:
print(message)
class MockWinreg:
HKEY_LOCAL_MACHINE = "HKLM"
HKEY_CURRENT_USER = "HKCU"
def __init__(self, keys):
self.keys = {k.casefold(): v for k, v in keys.items()}
self.open = {}
def __repr__(self):
return "<MockWinreg>"
def __eq__(self, other):
return isinstance(other, type(self))
def open_keys(self):
return list(self.open)
def OpenKeyEx(self, hkey, subkey):
if verbose:
print(f"OpenKeyEx({hkey}, {subkey})")
key = f"{hkey}\\{subkey}".casefold()
if key in self.keys:
self.open[key] = self.open.get(key, 0) + 1
return key
raise FileNotFoundError()
def CloseKey(self, hkey):
if verbose:
print(f"CloseKey({hkey})")
hkey = hkey.casefold()
if hkey not in self.open:
raise RuntimeError("key is not open")
self.open[hkey] -= 1
if not self.open[hkey]:
del self.open[hkey]
def EnumKey(self, hkey, i):
if verbose:
print(f"EnumKey({hkey}, {i})")
hkey = hkey.casefold()
if hkey not in self.open:
raise RuntimeError("key is not open")
prefix = f'{hkey}\\'
subkeys = [k[len(prefix):] for k in sorted(self.keys) if k.startswith(prefix)]
subkeys[:] = [k for k in subkeys if '\\' not in k]
for j, n in enumerate(subkeys):
if j == i:
return n.removeprefix(prefix)
raise OSError("end of enumeration")
def QueryValue(self, hkey, subkey):
if verbose:
print(f"QueryValue({hkey}, {subkey})")
hkey = hkey.casefold()
if hkey not in self.open:
raise RuntimeError("key is not open")
if subkey:
subkey = subkey.casefold()
hkey = f'{hkey}\\{subkey}'
try:
return self.keys[hkey]
except KeyError:
raise OSError()
class MockPosixNamespace(dict):
def __init__(self, *a, argv0=None, config=None, **kw):
self.update(DEFAULT_NAMESPACE)
self["config"] = DEFAULT_CONFIG.copy()
self["os_name"] = "posix"
self["PLATLIBDIR"] = "lib"
super().__init__(*a, **kw)
if argv0:
self["config"]["orig_argv"] = [argv0]
if config:
self["config"].update(config)
self._files = {}
self._xfiles = set()
self._links = {}
self._dirs = set()
self._warnings = []
def add_known_file(self, path, lines=None):
self._files[path] = list(lines or ())
self.add_known_dir(path.rpartition("/")[0])
def add_known_xfile(self, path):
self.add_known_file(path)
self._xfiles.add(path)
def add_known_link(self, path, target):
self._links[path] = target
def add_known_dir(self, path):
p = path.rstrip("/")
while p:
self._dirs.add(p)
p = p.rpartition("/")[0]
def __missing__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key) from None
def abspath(self, path):
if self.isabs(path):
return path
return self.joinpath("/Absolute", path)
def basename(self, path):
return path.rpartition("/")[2]
def dirname(self, path):
return path.rstrip("/").rpartition("/")[0]
def hassuffix(self, path, suffix):
return path.endswith(suffix)
def isabs(self, path):
return path[0:1] == "/"
def isdir(self, path):
if verbose:
print("Check if", path, "is a dir")
return path in self._dirs
def isfile(self, path):
if verbose:
print("Check if", path, "is a file")
return path in self._files
def ismodule(self, path):
if verbose:
print("Check if", path, "is a module")
return path in self._files and path.rpartition(".")[2] == "py"
def isxfile(self, path):
if verbose:
print("Check if", path, "is an xfile")
return path in self._xfiles
def joinpath(self, *path):
return posixpath.normpath(posixpath.join(*path))
def readlines(self, path):
try:
return self._files[path]
except KeyError:
raise FileNotFoundError(path) from None
def realpath(self, path, _trail=None):
if verbose:
print("Read link from", path)
try:
link = self._links[path]
except KeyError:
return path
if _trail is None:
_trail = set()
elif link in _trail:
raise OSError("circular link")
_trail.add(link)
return self.realpath(link, _trail)
def warn(self, message):
self._warnings.append(message)
if verbose:
print(message)
def diff_dict(before, after, prefix="global"):
diff = []
for k in sorted(before):
if k[:2] == "__":
continue
if k == "config":
diff_dict(before[k], after[k], prefix="config")
continue
if k in after and after[k] != before[k]:
diff.append((k, before[k], after[k]))
if not diff:
return
max_k = max(len(k) for k, _, _ in diff)
indent = " " * (len(prefix) + 1 + max_k)
if verbose:
for k, b, a in diff:
if b:
print("{}.{} -{!r}\n{} +{!r}".format(prefix, k.ljust(max_k), b, indent, a))
else:
print("{}.{} +{!r}".format(prefix, k.ljust(max_k), a))
def dump_dict(before, after, prefix="global"):
if not verbose or not after:
return
max_k = max(len(k) for k in after)
for k, v in sorted(after.items(), key=lambda i: i[0]):
if k[:2] == "__":
continue
if k == "config":
dump_dict(before[k], after[k], prefix="config")
continue
try:
if v != before[k]:
print("{}.{} {!r} (was {!r})".format(prefix, k.ljust(max_k), v, before[k]))
continue
except KeyError:
pass
print("{}.{} {!r}".format(prefix, k.ljust(max_k), v))
def getpath(ns, keys):
before = copy.deepcopy(ns)
failed = True
try:
exec(SOURCE, ns)
failed = False
finally:
if failed:
dump_dict(before, ns)
else:
diff_dict(before, ns)
return {
k: ns['config'].get(k, ns.get(k, ...))
for k in keys
}
|
py | 1a336a599cb427d3653ec6216b371f30146c7025 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import boto3
import sure # noqa
from moto import mock_emr
@mock_emr
def test_run_job_flow():
client = boto3.client('emr', region_name='us-east-1')
cluster_id = client.run_job_flow(
Name='cluster',
Instances={
'MasterInstanceType': 'c3.xlarge',
'SlaveInstanceType': 'c3.xlarge',
'InstanceCount': 3,
'Placement': {'AvailabilityZone': 'us-east-1a'},
'KeepJobFlowAliveWhenNoSteps': True,
},
VisibleToAllUsers=True,
)
cluster_id.should.have.key('JobFlowId')
@mock_emr
def test_list_clusters():
client = boto3.client('emr', region_name='us-east-1')
client.run_job_flow(
Name='cluster',
Instances={
'MasterInstanceType': 'c3.xlarge',
'SlaveInstanceType': 'c3.xlarge',
'InstanceCount': 3,
'Placement': {'AvailabilityZone': 'us-east-1a'},
'KeepJobFlowAliveWhenNoSteps': True,
},
VisibleToAllUsers=True,
)
summary = client.list_clusters()
clusters = summary['Clusters']
clusters.should.have.length_of(1)
cluster = clusters[0]
cluster['NormalizedInstanceHours'].should.equal(0)
cluster['Status']['State'].should.equal("RUNNING")
|
py | 1a336a61f00d9481b75e8cefd4c726cf81e86987 | from kivymd.app import MDApp
from kivymd.uix.button import MDFloatingActionButton, MDRectangleFlatButton,MDFlatButton
from kivymd.uix.screen import Screen
from tkinter.filedialog import askdirectory
from tkinter import Tk
from kivymd.uix.dialog import MDDialog
import time
import os
import shutil
from pydrive.auth import GoogleAuth#this is to import google auth
from pydrive.drive import GoogleDrive#this will import the google drive module
class MainApp(MDApp):
def build(self):
screen = Screen()
btn1 = MDRectangleFlatButton(text='Select Client No', pos_hint={'center_x': 0.5, 'center_y': 0.5},on_release=self.select_client_no)
btn3 = MDRectangleFlatButton(text='Import', pos_hint={'center_x': 0.2, 'center_y': 0.5},on_release=self.func_imp)
btn2 = MDRectangleFlatButton(text='Start', pos_hint={'center_x': 0.8, 'center_y': 0.5},on_release=self.run_prog)
screen.add_widget(btn3)
screen.add_widget(btn2)
screen.add_widget(btn1)
return screen
def func_imp(self,obj):
global path
root=Tk()
path=askdirectory(title="Please select a directory to import")
root.update()
root.destroy()
def select_client_no(self,obj):
self.dialog = MDDialog(title='Select a client no',
size_hint=(0.8, 1),
buttons=[MDRectangleFlatButton(text='2', on_release=self.press_2),
MDRectangleFlatButton(text='1',on_release=self.press_1)])
self.dialog.open()
def press_1(self,obj):
global clientno
clientno="1"
self.dialog.dismiss()
def press_2(self,obj):
global clientno
clientno='2'
self.dialog.dismiss()
def run_prog(self,obj):
#clientno=the actual clientno
#opossiteclientno=oppsite client no
def first_login():#this function will be used when a user needs to login for the first time.
global drive
gauth = GoogleAuth()
gauth.LocalWebserverAuth()
gauth.SaveCredentialsFile("mycreds.txt")
drive = GoogleDrive(gauth)
def not_first_login():#this function will be used when a user had already logged in before.
global drive
gauth = GoogleAuth()
gauth.LoadCredentialsFile("mycreds.txt")
drive=GoogleDrive(gauth)
def exist_notexist():#returns first or not first
try:
with open('mycreds.txt') as reader:
confirmsize=reader.read()
if confirmsize>2:
return 'not_first'
else:
return 'first'
except:
return 'first'
#this will upload the files
def file_upload(item_file):
upload_file = drive.CreateFile()
upload_file.SetContentFile(item_file) #load local file data into the File instance
upload_file.Upload() #creates a file in your drive with the name: my-awesome-file.txt
#this will delete the files in the drive
def filedelete(item_file):
file_list = drive.ListFile({'q': "title contains "+"'"+item_file+"'"+" and trashed=false"}).GetList() #find the file using file name.
file_id = file_list[0]['id'] #get the file ID.
file = drive.CreateFile({'id': file_id})
file.Delete()
#this will get the paths of the clients and convert them to lists
def os_file_list(path_of_the_folder_to_sync):#the outpu will be recived in two varaibles the first one in the folder paths list and the second part is the file paths list
global folderpaths_of_client
global filepaths_of_client
folderpaths_of_client=list()
filepaths_of_client=list()
#this will walk through all the folders and subfolders to gather the file paths
for folders,subfolders,files in os.walk(path_of_the_folder_to_sync):#Make a fuction for path!
folderpaths_of_client.append(folders[len(path_of_the_folder_to_sync):])
for file in files:
filepaths_of_client.append(folders[len(path_of_the_folder_to_sync):]+"\\"+file)
folderpaths_of_client.sort()
filepaths_of_client.sort()
return folderpaths_of_client,filepaths_of_client
def list_writer(list_you_want_to_write_into_a_text_file,nameofthedoc):#you need to give the list first and then the name of the document or textfile
with open(nameofthedoc+'.txt','w') as write_handler:
for item in list_you_want_to_write_into_a_text_file:
write_handler.write(item+'\n')#this will write the files/paths in order.
#This function takes in the document files and converts them to a list.
def list_reader(filename_of_the_textfile):#this will return a list.
try:
with open(filename_of_the_textfile,'r') as reader_handle:
tempreader=reader_handle.read()
return tempreader.split('\n')
except:
log_client('failed to open in list_reader',filename_of_the_textfile)
def log_client(string,string2optional=''):#can take in two strings ,second strings default value is None.
with open('logfile.txt','a+') as log_writer:
log_writer.write(string+' '+string2optional+'\n')
def copy_file(copyname,copypath):
shutil.copy2(copyname,copypath)
def file_download(item_to_download): #downloading the files from the drive
downloadtry=0
while True:
try:
time.sleep(2)
file_list = drive.ListFile({'q': "title contains "+"'"+item_to_download+"'"+" and trashed=false"}).GetList()#find the file using file name.
file_id = file_list[0]['id'] # get the file ID.
file = drive.CreateFile({'id': file_id})
file.GetContentFile(item_to_download) # downloads the file content and file.
file.Delete()
break
except: #skips the download of the files if the tryies exceed 3 times.
log_client('failed to download :',item_to_download)
continue
'''downloadtry+=1
if downloadtry>=10:
downloadtry=0
break'''
def file_delete(item_to_delete):#this fuction will be used to delete items
os.remove(item_to_delete)
#Syncing Part Starts here
#this part will take care of signing in
signinvar=exist_notexist()
if signinvar=='first':
first_login()
if signinvar=='not_first':
not_first_login()
#this part of the code will upload the os_file_list() files
#clientno=input('Enter the client no : ')
#path=askdirectory(title='Import the folder you want to sync')#after done the tinkter window needs to be closed.
folderPaths,filePaths= os_file_list(path)
list_writer(folderPaths,'folderpath'+clientno)#rememeber folderpath.
list_writer(filePaths,'filepath'+clientno)#remeber file path.
file_upload('folderpath'+clientno+'.txt')#this will upload the files to the drivev.
file_upload('filepath'+clientno+'.txt')#this will upload the files to the drive.
file_delete('folderpath'+clientno+'.txt')#this will delete file paths from the pc.
file_delete('filepath'+clientno+'.txt')#this will delete file paths from the pc.
#This part of the code will download the file paths from the other client.
if clientno=='1':
opp_clientno='2'
if clientno=='2':
opp_clientno='1'
#we never need to think about the oppsite client no again.
file_download('folderpath'+opp_clientno+'.txt')
file_download('filepath'+opp_clientno+'.txt')
#this part of the code will convert the downloaded files into lists
files_from_the_other_client=list_reader('filepath'+opp_clientno+'.txt')
folders_from_the_other_client=list_reader('folderpath'+opp_clientno+'.txt')
file_delete('folderpath'+opp_clientno+'.txt')
file_delete('filepath'+opp_clientno+'.txt')
#this part of the code will compare the lists from the other client and this client:
missing_files_from_this_client=list()
missing_folders_from_this_client=list()
#this will filter the files
for item in files_from_the_other_client:
if item not in filepaths_of_client:
missing_files_from_this_client.append(item)
#this will filter the folder
for item in folders_from_the_other_client:
if item not in folderpaths_of_client:
missing_folders_from_this_client.append(item)
#this part of the code will upload the filelist missing on this client.
#making a list of files that the other client needs to upload
list_writer(missing_files_from_this_client,'filestoupload'+clientno)
file_upload('filestoupload'+clientno+'.txt')
file_delete('filestoupload'+clientno+'.txt')
#this part of the code will download the uploadfilelist
file_download('filestoupload'+opp_clientno+'.txt')
files_to_upload=list_reader('filestoupload'+opp_clientno+'.txt')
file_delete('filestoupload'+opp_clientno+'.txt')
files_to_upload.sort()
#This is the part of code where folders/files will start Syncing.
for item in missing_folders_from_this_client:
if item=='':
continue
os.mkdir(path+item)
if clientno=='1':
#this part will take care of uploading
for item in files_to_upload:
if item=='':
continue
file_upload(path+item) #we might need to move the upload files to the actual path.
#this part will take care of the downloads
for item in missing_files_from_this_client:
if item=='':
continue
name_splitter=item.split('\\')
file=name_splitter[-1]
subtract_file_name=len(item)-len(file)
file_download(file)
while True:
try:
shutil.move(os.getcwd()+'\\'+file,path+item[:subtract_file_name])
log_client(os.getcwd()+'\\'+file+'\n',path+item[:subtract_file_name])
break
except:
log_client(os.getcwd()+'\\'+file+'\n',path+item[:subtract_file_name])
if clientno=='2':
for item in missing_files_from_this_client:
if item=='':
continue
name_splitter=item.split('\\')
file=name_splitter[-1]
subtract_file_name=len(item)-len(file)
file_download(file)
while True:
try:
shutil.move(os.getcwd()+'\\'+file,path+item[:subtract_file_name])
log_client(os.getcwd()+'\\'+file+'\n',path+item[:subtract_file_name])
break
except:
log_client(os.getcwd()+'\\'+file+'\n',path+item[:subtract_file_name])
#this part will take care of uploading
for item in files_to_upload:
if item=='':
continue
file_upload(path+item) #we might need to move the upload files to the actual path.
MainApp().run() |
py | 1a336ac3ca6648c2593a0f9f94612cadcdf1cc57 | def check_evgen(myList):
if "Євген" in myList:
return True
return False
friends = ["Ярослав", "Євген"]
print(check_evgen(friends))
|
py | 1a336bf146611dcee5979ad37698975fa2683aa6 | # coding=utf-8
# Copyright 2020 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some utilities for self-attention estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from tensorflow_gan.examples.self_attention_estimator import eval_lib
import tensorflow_gan as tfgan # tf
def get_tpu_run_config_from_hparams(hparams):
"""Create a TPU-suitable RunConfig from HParams."""
tf.compat.v1.logging.info('tpu_location: %s', hparams.tpu_params.tpu_location)
tf.compat.v1.logging.info('gcp_project: %s', hparams.tpu_params.gcp_project)
tf.compat.v1.logging.info('tpu_zone: %s', hparams.tpu_params.tpu_zone)
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=hparams.tpu_params.tpu_location,
project=hparams.tpu_params.gcp_project,
zone=hparams.tpu_params.tpu_zone)
if hparams.debug_params.eval_on_tpu:
eval_training_input_configuration = tf.compat.v1.estimator.tpu.InputPipelineConfig.SLICED
else:
# InputPipelineConfig.SLICED is not supported when running on CPU.
eval_training_input_configuration = tf.compat.v1.estimator.tpu.InputPipelineConfig.PER_HOST_V1
return tf.compat.v1.estimator.tpu.RunConfig(
model_dir=hparams.model_dir,
cluster=cluster_resolver,
save_checkpoints_steps=hparams.train_steps_per_eval,
tpu_config=tf.compat.v1.estimator.tpu.TPUConfig(
iterations_per_loop=hparams.tpu_params.tpu_iterations_per_loop,
eval_training_input_configuration=eval_training_input_configuration))
def get_run_config_from_hparams(hparams):
mirrored_strategy = tf.distribute.MirroredStrategy()
return tf.estimator.RunConfig(
model_dir=hparams.model_dir,
save_checkpoints_steps=hparams.train_steps_per_eval,
train_distribute=mirrored_strategy)
def get_tpu_estimator(generator, discriminator, hparams, config):
return tfgan.estimator.TPUGANEstimator(
generator_fn=generator,
discriminator_fn=discriminator,
generator_loss_fn=tfgan.losses.wasserstein_hinge_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_hinge_discriminator_loss,
generator_optimizer=tf.compat.v1.train.AdamOptimizer(
hparams.generator_lr, hparams.beta1),
discriminator_optimizer=tf.compat.v1.train.AdamOptimizer(
hparams.discriminator_lr, hparams.beta1),
prepare_arguments_for_eval_metric_fn=prepare_metric_arguments,
get_eval_metric_ops_fn=functools.partial(get_metrics, hparams=hparams),
eval_on_tpu=hparams.debug_params.eval_on_tpu,
train_batch_size=hparams.train_batch_size,
eval_batch_size=hparams.eval_batch_size,
predict_batch_size=hparams.predict_batch_size,
use_tpu=hparams.debug_params.use_tpu,
config=config,
params=hparams._asdict())
def get_gpu_estimator(generator, discriminator, hparams, config):
"""Returns an Estimator object to be used for training with GPUs."""
def gpu_get_metric(gan_model):
"""A function compatible with GANEstimator's get_eval_metric_ops_fn arg."""
metrics_arguments = prepare_metric_arguments(
gan_model.generator_inputs, gan_model.generated_data,
gan_model.real_data, gan_model.discriminator_real_outputs,
gan_model.discriminator_gen_outputs)
metrics = get_metrics(hparams=hparams, **metrics_arguments)
# Generate image summaries.
real_data = gan_model.real_data
generated_data = gan_model.generated_data
real_images = (
real_data['images'] if isinstance(real_data, dict) else real_data)
gen_images = (
generated_data['images']
if isinstance(generated_data, dict) else generated_data)
metrics.update(_generator_summary_ops(gen_images, real_images))
return metrics
return tfgan.estimator.GANEstimator(
generator_fn=generator,
discriminator_fn=discriminator,
generator_loss_fn=tfgan.losses.wasserstein_hinge_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_hinge_discriminator_loss,
generator_optimizer=tf.compat.v1.train.AdamOptimizer(
hparams.generator_lr, hparams.beta1),
discriminator_optimizer=tf.compat.v1.train.AdamOptimizer(
hparams.discriminator_lr, hparams.beta1),
get_eval_metric_ops_fn=gpu_get_metric,
config=config,
params=hparams._asdict())
def prepare_metric_arguments(generator_inputs, generated_data, real_data,
discriminator_real_outputs,
discriminator_gen_outputs):
"""Prepares the arguments needed for get_metrics.
When training on TPUs, this function should be executed on TPU.
Args:
generator_inputs: Inputs to the generator fn.
generated_data: Output from the generator.
real_data: A sample of real data.
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data.
Returns:
A metric dictionary.
"""
del generator_inputs, discriminator_real_outputs, discriminator_gen_outputs
real_images = (real_data['images'] if isinstance(real_data, dict) else
real_data)
gen_images = (generated_data['images'] if isinstance(generated_data, dict)
else generated_data)
# Get logits and pools for real and generated images.
real_logits, real_pools = eval_lib.get_activations(
lambda: real_images, num_batches=1, get_logits=True)
fake_logits, fake_pools = eval_lib.get_activations(
lambda: gen_images, num_batches=1, get_logits=True)
return {
'real_logits': real_logits,
'real_pools': real_pools,
'fake_logits': fake_logits,
'fake_pools': fake_pools
}
def get_metrics(real_logits, real_pools, fake_logits, fake_pools, hparams):
"""Return metrics for SAGAN experiment on TPU, CPU, or GPU.
When training on TPUs, this function should be executed on the CPU.
Args:
real_logits: The real_logits object retured by prepare_metric_arguments.
real_pools: The real_pools object retured by prepare_metric_arguments.
fake_logits: The fake_logits object retured by prepare_metric_arguments.
fake_pools: The fake_pools object retured by prepare_metric_arguments.
hparams: An hparams object.
Returns:
A metric dictionary.
"""
del hparams
metric_dict = {
'eval/real_incscore':
tfgan.eval.classifier_score_from_logits_streaming(real_logits),
'eval/incscore':
tfgan.eval.classifier_score_from_logits_streaming(fake_logits),
'eval/fid':
tfgan.eval.frechet_classifier_distance_from_activations_streaming(
real_pools, fake_pools),
}
return metric_dict
def _generator_summary_ops(generated_images, real_images):
"""Creates a dictionary of image summaries."""
real_img_summ = tf.compat.v1.summary.image('real_images', real_images)
gen_img_summ = tf.compat.v1.summary.image('gen_images', generated_images)
real_img_grid = tf.compat.v1.summary.image(
'real_images_grid',
tfgan.eval.image_grid(
real_images[:16],
grid_shape=(4, 4),
image_shape=(128, 128),
num_channels=3))
gen_img_grid = tf.compat.v1.summary.image(
'generated_images_grid',
tfgan.eval.image_grid(
generated_images[:16],
grid_shape=(4, 4),
image_shape=(128, 128),
num_channels=3))
return {
'images/real': (real_img_summ, tf.no_op()),
'images/gen': (gen_img_summ, tf.no_op()),
'image_grid/real': (real_img_grid, tf.no_op()),
'image_grid/gen': (gen_img_grid, tf.no_op()),
}
|
py | 1a336cbbefb55612c5738bba0a5407375a8bb8c5 | """
Make the event issues on the gitlab issue tracker from gracedb.
"""
import os
import asimov
from asimov.event import Event, DescriptionException
from asimov import config
from asimov import gitlab
from asimov import config
import numpy as np
import yaml
from git.exc import GitCommandError
import click
#from ligo.gracedb.rest import GraceDb, HTTPError
#client = GraceDb(service_url=config.get("gracedb", "url"))
#r = client.ping()
#superevent_iterator = client.superevents('O3B_CBC_CATALOG')
#superevent_ids = [superevent['superevent_id'] for superevent in superevent_iterator]
server = gitlab.gitlab.Gitlab(config.get("gitlab", "url"), private_token=config.get("gitlab", "token"))
repository = server.projects.get(config.get("olivaw", "tracking_repository"))
CALIBRATION_NOTE = """
## Calibration envelopes
The following calibration envelopes have been found.
```yaml
---
{}
---
```
"""
@click.group()
def olivaw():
"""
This is the main olivaw program which runs the DAGs for each event issue.
"""
click.echo("Running olivaw")
global rundir
rundir = os.getcwd()
def find_calibrations(time):
with open("LLO_calibs.txt") as llo_file:
data_llo = llo_file.read().split("\n")
data_llo = [datum for datum in data_llo if datum[-16:]=="FinalResults.txt"]
times_llo = {int(datum.split("GPSTime_")[1].split("_C01")[0]): datum for datum in data_llo}
with open("LHO_calibs.txt") as llo_file:
data_lho = llo_file.read().split("\n")
data_lho = [datum for datum in data_lho if datum[-16:]=="FinalResults.txt"]
times_lho = {int(datum.split("GPSTime_")[1].split("_C01")[0]): datum for datum in data_lho}
keys_llo = np.array(list(times_llo.keys()))
keys_lho = np.array(list(times_lho.keys()))
return {"H1": times_lho[keys_lho[np.argmin(np.abs(keys_lho - time))]], "L1": times_llo[keys_llo[np.argmin(np.abs(keys_llo - time))]], "V1": "/home/cbc/pe/O3/calibrationenvelopes/Virgo/V_O3a_calibrationUncertaintyEnvelope_magnitude5percent_phase35milliradians10microseconds.txt"}
# Update existing events
for event in gitlab_events:
print(event.name)
try:
event.event_object._check_calibration()
except DescriptionException:
time = event.event_object.meta['event time']
calibrations = find_calibrations(time)
envelopes = yaml.dump({"calibration": calibrations})
event.add_note(f"""
## Calibration envelopes
The following calibration envelopes have been found.
```yaml
---
{envelopes}
---
```
""")
>>>>>>> cb204b61f687395eb980468da3b8ced48c5c7e40
@click.option("--event", "event", default=None, help="The event which the ledger should be returned for, optional.")
@olivaw.command()
def calibration(event):
gitlab_events = gitlab.find_events(repository, subset=event)
# Update existing events
for event in gitlab_events:
if "disable_repo" in event.event_object.meta:
if event.event_object.meta['disable_repo'] == True:
continue
try:
event.event_object._check_calibration()
except DescriptionException:
print(event.title)
time = event.event_object.meta['event time']
calibrations = find_calibrations(time)
print(calibrations)
# try:
for ifo, envelope in calibrations.items():
description = f"Added calibration {envelope} for {ifo}."
try:
event.event_object.repository.add_file(os.path.join(f"/home/cal/public_html/uncertainty/O3C01/{ifo}", envelope), f"C01_offline/calibration/{ifo}.dat",
commit_message=description)
except GitCommandError as e:
if "nothing to commit," in e.stderr:
pass
calibrations[ifo] = f"C01_offline/calibration/{ifo}.dat"
envelopes = yaml.dump({"calibration": calibrations})
event.add_note(CALIBRATION_NOTE.format(envelopes))
olivaw()
|
py | 1a336e35c002486c174048b67ca6b09d2a94950c | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from recommonmark.parser import CommonMarkParser
from unittest.mock import MagicMock
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
# TODO: https://github.com/rtfd/recommonmark/issues/93
# TODO https://github.com/rtfd/recommonmark/issues/120
# This patch helps in linking markdown files within mardown files
from recommonmark.states import DummyStateMachine
# Monkey patch to fix recommonmark 0.4 doc reference issues.
orig_run_role = DummyStateMachine.run_role
def run_role(self, name, options=None, content=None):
if name == 'doc':
name = 'any'
return orig_run_role(self, name, options, content)
DummyStateMachine.run_role = run_role
# -- Project information -----------------------------------------------------
project = 'Mozhi'
copyright = '2021, Mozhi'
author = 'Mageswaran'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx_markdown_tables',
# 'sphinxarg.ext',
# 'm2r', # https://github.com/miyakogi/m2r/pull/55
'sphinx.ext.githubpages']
# 'sphinxcontrib.bibtex',
# 'sphinx.ext.napoleon',
# 'nbsphinx', #https://nbsphinx.readthedocs.io/en/0.6.0/
# 'sphinx_issues', # https://github.com/sloria/sphinx-issues
# 'sphinx_copybutton']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# www.sphinx-doc.org/en/stable/markdown.html
# https://github.com/sphinx-doc/sphinx/issues/7000
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
source_parsers = {
'.md': CommonMarkParser,
}
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'karma_sphinx_theme'
# html_theme = 'sphinx_book_theme'
# html_theme = 'sphinx_redactor_theme'
html_css_files = [
'mozhi_style.css',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# html_theme_options = {'body_max_width': '90%'}
# Output file base name for HTML help builder.
html_theme_options = {
'navigation_depth': 3,
'includehidden': True,
'titles_only': False
}
htmlhelp_basename = 'mozhi'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'https://fonts.googleapis.com/css?family=Lato',
'_static/css/mozhi_style.css'
],
}
# At the bottom of conf.py
# https://recommonmark.readthedocs.io/en/latest/auto_structify.html
def setup(app):
app.add_config_value('recommonmark_config', {
'enable_auto_toc_tree' : True,
'enable_math': True,
'enable_inline_math': True,
}, True)
app.add_transform(AutoStructify)
app.add_css_file('custom.css') |
py | 1a336edda9c89c8a7b84f91d494c909d37f27618 | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2014 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: Eric Belz
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""Verbs may appear to be an antipatterm: the methods go mutate another
objects attributes (Grammar). But that is how RDF works: metawords change
the grammar.
"""
## \namespace rdf.language.lexis.pragmatics Words with (reflexive) meaning
## (Verb)
import abc
from iscesys.Parsers.rdf.language import lexis
## A str subclass that is also an Abstract Base Class: real RDF commands are \n
## _strings_ (hence str) with __meaning__ (hence they are subclasses of
## _Verb)
class _Verb(lexis.Word):
"""_Pragamtic is an self identifying string"""
__metaclass__ = abc.ABCMeta
## Allow class to ondeify itself from a line (given an operator).
def line_is(self, line, grammar):
"""line_is(line, grammar) IFF line is pragamatic"""
line = line.strip()
if not line.startswith(self): # Guard
return False
## Does the line starts with the string?
subline = line.lstrip(self).strip()
return subline.startswith(grammar.operator)
## Act is not action--> act tells this object to go do it's thing \n
## which is act on the grammar according to line.
@abc.abstractmethod
def act(self, line, grammar):
"""Abstract method must be overriden in concrete subclasses"""
## Verbs must act -- or return an empty iterable.
def sin_qua_non(self, line, grammar):
return self.act(line, grammar) or ()
## Include is the most complicated Word: it initiates a recirsive \n
## call to rdf_incluec(), and thus, returns a list of RDFRecord objects.
class Include(_Verb):
"""Verb can identify the INCLUDE lines"""
## Include.act should never be called- an dynamic error will be thrown#
def act(self, line, grammar):
from iscesys.Parsers.rdf.uRDF import rdf_include
src = grammar.operator.right(line)
## Sends in the grammar to the include files
return rdf_include(src, _grammar=grammar)
## ABC for any Verb that changes a gramar symbol.
class _SymbolChange(_Verb):
__metaclass__ = abc.ABCMeta
## A concrete method for an abstract class-- this changes grammar
def act(self, line, grammar):
"""<Verb>(line).act(grammar, line) --> modify grammar:
grammar.<verb> = grammar.get_value(line)
note: this could be a method of grammar that takes <pragamatic>
as input-->
self act(attr, value) # I guess this is setattr?
"""
setattr(grammar,
self.__class__.__name__.lower(),
grammar.get_value(line))
## OPERATOR keyword change's rdf.language.syntax.Grammar.operator
class Operator(_SymbolChange):
"""Change grammar's operator"""
## COMMENT keyword change's rdf.language.syntax.Grammar.comment
class Comment(_SymbolChange):
"""Change grammar's comment attribute"""
## Its complicated and may not be a good idea.
class _Affix(_Verb):
"""_Affix is an ABC
"""
__metaclass__ = abc.ABCMeta
## Change grammar's attribute that is the lower case of the class name,\n
## b/c the attribute is list-- you can use getattr on grammar and overload\n
## the result's __setitem__.
def act(self, line, grammar):
"""act(grammar, line) changes grammar's affix matching
self.__class__.__name__ according to the assignment in line"""
# assignment to a list element in an unusual format:
getattr(
grammar, self.__class__.__name__.lower()
)[int(grammar)] = grammar.get_value(line)
## An _Affix that coorperates with rdf.language.syntax.Grammar.prefix
class Prefix(_Affix):
"""Prefix is an _Affix that cooperates with Gramar.prefix"""
## An _Affix that coorperates with rdf.language.syntax.Grammar.suffix
class Suffix(_Affix):
"""Suffix is an _Affix that cooperates with Gramar.suffix"""
## Reserved Verbs Classes -like the constants, but functional
VERBS = (Include, Operator, Comment, Prefix, Suffix)
|
py | 1a33707f66c2ac082c83ac4c9e312b699380dfdf | numeros = ('zero', 'um', 'dois', 'tres', 'quatro', 'cinco',
'seis', 'sete', 'oito', 'nove', 'dez', 'onze',
'doze', 'treze', 'catorze', 'quize', 'dezesseis',
'dezesete', 'dezoito','dezenove', 'vinte')
while True:
num = int(input('Digite um número entre 0 e 20: '))
if num <= 20:
break
print('Tente novamente. ', end='')
print(f'Você digitou o número {numeros[num]}.') |
py | 1a3371159b0c6b6570e0b1bb87b1e971eb127b36 | from setuptools import setup
import setuptools
setup(
name='cords',
version='0.2.6',
author='Krishnateja Killamsetty, Dheeraj Bhat, Rishabh Iyer',
author_email='[email protected]',
#packages=['cords', 'cords/selectionstrategies', 'cords/utils'],
url='https://github.com/decile-team/cords',
license='LICENSE.txt',
packages=setuptools.find_packages(),
description='cords is a package for data subset selection for efficient and robust machine learning.',
install_requires=[
"numpy >= 1.14.2",
"scipy >= 1.0.0",
"numba >= 0.43.0",
"tqdm >= 4.24.0",
"torch >= 1.4.0",
"apricot-select >= 0.6.0",
"sphinxcontrib-napoleon",
"sphinxcontrib-bibtex",
"sphinx-rtd-theme",
"scikit-learn",
"torchvision >= 0.5.0",
"matplotlib",
"ray[tune]"
],
)
|
py | 1a3371630a3ef7daa5994c0be57d443e6839c758 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from typing import Any, Tuple, Dict, List, Callable
import sqlalchemy
import pymysql
import traceback
import hashlib
import logging
from sqlalchemy.sql import text
try:
# if integration is using an older image (4.5 Server) we don't have expiringdict
from expiringdict import ExpiringDict # pylint: disable=E0401
except Exception:
pass
# In order to use and convert from pymysql to MySQL this line is necessary
pymysql.install_as_MySQLdb()
GLOBAL_CACHE_ATTR = '_generic_sql_engine_cache'
DEFAULT_POOL_TTL = 600
class Client:
"""
Client to use in the SQL databases integration. Overrides BaseClient
makes the connection to the DB server
"""
def __init__(self, dialect: str, host: str, username: str, password: str, port: str,
database: str, connect_parameters: str, ssl_connect: bool, use_pool=False, pool_ttl=DEFAULT_POOL_TTL):
self.dialect = dialect
self.host = host
self.username = username
self.password = password
self.port = port
self.dbname = database
self.connect_parameters = connect_parameters
self.ssl_connect = ssl_connect
self.use_pool = use_pool
self.pool_ttl = pool_ttl
self.connection = self._create_engine_and_connect()
@staticmethod
def _convert_dialect_to_module(dialect: str) -> str:
"""
Converting a dialect to the correct string needed in order to connect the wanted dialect
:param dialect: the SQL db
:return: a key string needed for the connection
"""
if dialect == "MySQL":
module = "mysql"
elif dialect == "PostgreSQL":
module = "postgresql"
elif dialect == "Oracle":
module = "oracle"
elif dialect == "Microsoft SQL Server":
module = "mssql+pyodbc"
else:
module = str(dialect)
return module
@staticmethod
def _get_cache_string(url: str, connect_args: dict) -> str:
to_hash = url + repr(connect_args)
return hashlib.sha256(to_hash.encode('utf-8')).hexdigest()
def _get_global_cache(self) -> dict:
cache = getattr(sqlalchemy, GLOBAL_CACHE_ATTR, None)
if cache is None:
cache = ExpiringDict(100, max_age_seconds=self.pool_ttl)
setattr(sqlalchemy, GLOBAL_CACHE_ATTR, cache)
return cache
def _create_engine_and_connect(self) -> sqlalchemy.engine.base.Connection:
"""
Creating and engine according to the instance preferences and connecting
:return: a connection object that will be used in order to execute SQL queries
"""
module = self._convert_dialect_to_module(self.dialect)
port_part = ''
if self.port:
port_part = f':{self.port}'
db_preferences = f'{module}://{self.username}:{self.password}@{self.host}{port_part}/{self.dbname}'
ssl_connection = {}
if self.dialect == "Microsoft SQL Server":
db_preferences += "?driver=FreeTDS"
if self.connect_parameters and self.dialect == "Microsoft SQL Server":
db_preferences += f'&{self.connect_parameters}'
elif self.connect_parameters and self.dialect != "Microsoft SQL Server":
# a "?" was already added when the driver was defined
db_preferences += f'?{self.connect_parameters}'
if self.ssl_connect:
ssl_connection = {'ssl': {'ssl-mode': 'preferred'}}
engine: sqlalchemy.engine.Engine = None
if self.use_pool:
if 'expiringdict' not in sys.modules:
raise ValueError('Usage of connection pool is not support in this docker image')
cache = self._get_global_cache()
cache_key = self._get_cache_string(db_preferences, ssl_connection)
engine = cache.get(cache_key, None)
if engine is None: # (first time or expired) need to initialize
engine = sqlalchemy.create_engine(db_preferences, connect_args=ssl_connection)
cache[cache_key] = engine
else:
demisto.debug('Initializing engine with no pool (NullPool)')
engine = sqlalchemy.create_engine(db_preferences, connect_args=ssl_connection, poolclass=sqlalchemy.pool.NullPool)
return engine.connect()
def sql_query_execute_request(self, sql_query: str, bind_vars: Any) -> Tuple[Dict, List]:
"""Execute query in DB via engine
:param bind_vars: in case there are names and values - a bind_var dict, in case there are only values - list
:param sql_query: the SQL query
:return: results of query, table headers
"""
if type(bind_vars) is dict:
sql_query = text(sql_query)
result = self.connection.execute(sql_query, bind_vars)
results = result.fetchall()
headers = []
if results:
# if the table isn't empty
headers = results[0].keys()
return results, headers
def generate_default_port_by_dialect(dialect: str) -> str:
"""
In case no port was chosen, a default port will be chosen according to the SQL db type. Only return a port for
Microsoft SQL Server where it seems to be required. For the other drivers an empty port is supported.
:param dialect: sql db type
:return: default port needed for connection
"""
if dialect == "Microsoft SQL Server":
return "1433"
else:
# use default port supported by the driver
return ""
def generate_bind_vars(bind_variables_names: str, bind_variables_values: str) -> Any:
"""
The bind variables can be given in 2 legal ways: as 2 lists - names and values, or only values
any way defines a different executing way, therefore there are 2 legal return types
:param bind_variables_names: the names of the bind variables, must be in the length of the values list
:param bind_variables_values: the values of the bind variables, can be in the length of the names list
or in case there is no name lists - at any length
:return: a dict or lists of the bind variables
"""
bind_variables_names_list = argToList(bind_variables_names)
bind_variables_values_list = argToList(bind_variables_values)
if bind_variables_values and not bind_variables_names:
return [var for var in argToList(bind_variables_values)]
elif len(bind_variables_names_list) is len(bind_variables_values_list):
return dict(zip(bind_variables_names_list, bind_variables_values_list))
else:
raise Exception("The bind variables lists are not is the same length")
def test_module(client: Client, *_) -> Tuple[str, Dict[Any, Any], List[Any]]:
"""
If the connection in the client was successful the test will return OK
if it wasn't an exception will be raised
"""
return 'ok', {}, []
def sql_query_execute(client: Client, args: dict, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]:
"""
Executes the sql query with the connection that was configured in the client
:param client: the client object with the db connection
:param args: demisto.args() including the sql query
:return: Demisto outputs
"""
try:
sql_query = str(args.get('query'))
limit = int(args.get('limit', 50))
skip = int(args.get('skip', 0))
bind_variables_names = args.get('bind_variables_names', "")
bind_variables_values = args.get('bind_variables_values', "")
bind_variables = generate_bind_vars(bind_variables_names, bind_variables_values)
result, headers = client.sql_query_execute_request(sql_query, bind_variables)
# converting an sqlalchemy object to a table
converted_table = [dict(row) for row in result]
# converting b'' and datetime objects to readable ones
table = [{str(key): str(value) for key, value in dictionary.items()} for dictionary in converted_table]
table = table[skip:skip + limit]
human_readable = tableToMarkdown(name="Query result:", t=table, headers=headers,
removeNull=True)
context = {
'Result': table,
'Query': sql_query,
'InstanceName': f'{client.dialect}_{client.dbname}'
}
entry_context: Dict = {'GenericSQL(val.Query && val.Query === obj.Query)': {'GenericSQL': context}}
return human_readable, entry_context, table
except Exception as err:
# In case there is no query executed and only an action e.g - insert, delete, update
# the result will raise an exception when we try to read the data from it
if str(err) == "This result object does not return rows. It has been closed automatically.":
human_readable = "Command executed"
return human_readable, {}, []
raise err
# list of loggers we should set to debug when running in debug_mode
# taken from: https://docs.sqlalchemy.org/en/13/core/engines.html#configuring-logging
DEBUG_LOGGERS = [
'sqlalchemy.engine',
'sqlalchemy.pool',
'sqlalchemy.dialects',
]
def main():
sql_loggers: list = [] # saves the debug loggers
try:
if is_debug_mode():
for lgr_name in DEBUG_LOGGERS:
lgr = logging.getLogger(lgr_name)
sql_loggers.append(lgr)
demisto.debug(f'setting DEBUG for logger: {repr(lgr)}')
lgr.setLevel(logging.DEBUG)
params = demisto.params()
dialect = params.get('dialect')
port = params.get('port')
if not port:
port = generate_default_port_by_dialect(dialect)
user = params.get("credentials").get("identifier")
password = params.get("credentials").get("password")
host = params.get('host')
database = params.get('dbname')
ssl_connect = params.get('ssl_connect')
connect_parameters = params.get('connect_parameters')
use_pool = params.get('use_pool', False)
pool_ttl = int(params.get('pool_ttl') or DEFAULT_POOL_TTL)
if pool_ttl <= 0:
pool_ttl = DEFAULT_POOL_TTL
command = demisto.command()
LOG(f'Command being called in SQL is: {command}')
client = Client(dialect=dialect, host=host, username=user, password=password,
port=port, database=database, connect_parameters=connect_parameters,
ssl_connect=ssl_connect, use_pool=use_pool, pool_ttl=pool_ttl)
commands: Dict[str, Callable[[Client, Dict[str, str], str], Tuple[str, Dict[Any, Any], List[Any]]]] = {
'test-module': test_module,
'query': sql_query_execute,
'sql-command': sql_query_execute
}
if command in commands:
return_outputs(*commands[command](client, demisto.args(), command))
else:
raise NotImplementedError(f'{command} is not an existing Generic SQL command')
except Exception as err:
return_error(f'Unexpected error: {str(err)} \nquery: {demisto.args().get("query")} \n{traceback.format_exc()}')
finally:
try:
if client.connection:
client.connection.close()
except Exception as ex:
demisto.error(f'Failed clossing connection: {str(ex)}')
if sql_loggers:
for lgr in sql_loggers:
demisto.debug(f'setting WARN for logger: {repr(lgr)}')
lgr.setLevel(logging.WARN)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
py | 1a3371975a9989218d2bb5d082d8d66d4e4234be | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Date string field."""
from __future__ import absolute_import, print_function
import arrow
from arrow.parser import ParserError
from marshmallow import fields, missing
class DateString(fields.Date):
"""ISO8601-formatted date string."""
def _serialize(self, value, attr, obj, **kwargs):
"""Serialize an ISO8601-formatted date."""
try:
return super(DateString, self)._serialize(
arrow.get(value).date(), attr, obj, **kwargs)
except ParserError:
return missing
def _deserialize(self, value, attr, data, **kwargs):
"""Deserialize an ISO8601-formatted date."""
return super(DateString, self)._deserialize(value, attr,
data, **kwargs).isoformat()
|
py | 1a337204485b77174670257e9af303b845b3560e | # -----------------------------------------------------------------------------
#
# Copyright (c) 2017 Sam Cox, Roberto Sommariva
#
# This file is part of the AtChem2 software package.
#
# This file is covered by the MIT license which can be found in the file
# LICENSE.md at the top level of the AtChem2 distribution.
#
# -----------------------------------------------------------------------------
## Plotting tool for the AtChem2 model output
## --> Python version [requires numpy & matplotlib]
##
## Acknowledgements: M. Panagi
##
## ARGUMENT:
## - directory with the model output
##
## USAGE:
## python ./tools/plot/plot-atchem2-numpy.py ./model/output/
## ---------------------------------------------- ##
from __future__ import print_function
import os, sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
os.chdir(sys.argv[1])
print(os.getcwd())
with open('speciesConcentrations.output') as f:
var1 = f.readline().split()
with open('environmentVariables.output') as f:
var2 = f.readline().split()
with open('photolysisRates.output') as f:
var3 = f.readline().split()
with open('photolysisRatesParameters.output') as f:
var4 = f.readline().split()
df1 = np.loadtxt('speciesConcentrations.output', skiprows=1, unpack=True)
df2 = np.loadtxt('environmentVariables.output', skiprows=1, unpack=True)
df3 = np.loadtxt('photolysisRates.output', skiprows=1, unpack=True)
df4 = np.loadtxt('photolysisRatesParameters.output', skiprows=1, unpack=True)
nc1 = df1.shape[0]
nc2 = df2.shape[0]
nc3 = df3.shape[0]
nc4 = df4.shape[0]
## ---------------------------- ##
with PdfPages('atchem2_output.pdf') as pdf:
## speciesConcentrations.output
fig = plt.figure(figsize=(11,7))
j = 1
for i in range(1,nc1):
ax = fig.add_subplot(3,2,j)
ax.plot(df1[0], df1[i], linestyle='-', color='black')
ax.set(title=var1[i], xlabel='seconds', ylabel='')
plt.tight_layout()
plt.ticklabel_format(style='sci', axis='y', useMathText=True)
if j == 6:
pdf.savefig(fig)
fig = plt.figure(figsize=(11,7))
j = 1
else:
j = j + 1
pdf.savefig(fig)
## environmentVariables.output
fig = plt.figure(figsize=(11,7))
j = 1
for i in range(1,nc2):
ax = fig.add_subplot(3,2,j)
ax.plot(df2[0], df2[i], linestyle='-', color='black')
ax.set(title=var2[i], xlabel='seconds', ylabel='')
plt.tight_layout()
plt.ticklabel_format(style='sci', axis='y', useMathText=True)
if j == 6:
pdf.savefig(fig)
fig = plt.figure(figsize=(11,7))
j = 1
else:
j = j + 1
pdf.savefig(fig)
## photolysisRates.output
fig = plt.figure(figsize=(11,7))
j = 1
for i in range(1,nc3):
ax = fig.add_subplot(3,2,j)
ax.plot(df3[0], df3[i], linestyle='-', color='black')
ax.set(title=var3[i], xlabel='seconds', ylabel='')
plt.tight_layout()
plt.ticklabel_format(style='sci', axis='y', useMathText=True)
if j == 6:
pdf.savefig(fig)
fig = plt.figure(figsize=(11,7))
j = 1
else:
j = j + 1
pdf.savefig(fig)
## photolysisRatesParameters.output
fig = plt.figure(figsize=(11,7))
j = 1
for i in range(1,nc4):
ax = fig.add_subplot(3,2,j)
ax.plot(df4[0], df4[i], linestyle='-', color='black')
ax.set(title=var4[i], xlabel='seconds', ylabel='')
plt.tight_layout()
plt.ticklabel_format(style='sci', axis='y', useMathText=True)
if j == 6:
pdf.savefig(fig)
fig = plt.figure(figsize=(11,7))
j = 1
else:
j = j + 1
pdf.savefig(fig)
## ---------------------------- ##
print("\n===> atchem2_output.pdf created in directory:", sys.argv[1], "\n\n")
|
py | 1a33723d1c4083f5a915e9670821a05b92fab4de | from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
import datashape
import blaze
from blaze.datadescriptor import ddesc_as_py
from blaze.tests.common import MayBePersistentTest
from blaze import (append,
DyND_DDesc, BLZ_DDesc, HDF5_DDesc)
from blaze.py2help import skip, skipIf
import blz
from blaze.optional_packages import tables_is_here
if tables_is_here:
import tables as tb
class TestEphemeral(unittest.TestCase):
def test_create_scalar(self):
a = blaze.array(True)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('bool'))
self.assertEqual(bool(a), True)
a = blaze.array(-123456)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('int32'))
self.assertEqual(int(a), -123456)
a = blaze.array(-1.25e-10)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('float64'))
self.assertEqual(float(a), -1.25e-10)
a = blaze.array(-1.25e-10+2.5j)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('complex[float64]'))
self.assertEqual(complex(a), -1.25e-10+2.5j)
def test_create_from_numpy(self):
a = blaze.array(np.arange(3))
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), [0, 1, 2])
def test_create(self):
# A default array (backed by DyND)
a = blaze.array([1,2,3])
self.assertTrue(isinstance(a, blaze.Array))
self.assertTrue(str(a.dshape) == "3 * int32")
self.assertEqual(ddesc_as_py(a.ddesc), [1, 2, 3])
def test_create_dshape(self):
# A default array (backed by DyND)
a = blaze.array([1,2,3], 'float64')
self.assertTrue(isinstance(a, blaze.Array))
self.assertTrue(str(a.dshape) == "3 * float64")
self.assertEqual(ddesc_as_py(a.ddesc), [1, 2, 3])
def test_create_append(self):
# A default array (backed by DyND, append not supported yet)
a = blaze.array([])
self.assertTrue(isinstance(a, blaze.Array))
self.assertRaises(ValueError, append, a, [1,2,3])
def test_create_compress(self):
# A compressed array (backed by BLZ)
ddesc = BLZ_DDesc(mode='w', bparams=blz.bparams(clevel=5))
a = blaze.array(np.arange(1,4), ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), [1, 2, 3])
def test_create_iter(self):
# A simple 1D array
a = blaze.array(i for i in range(10))
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('10 * int32'))
self.assertEqual(ddesc_as_py(a.ddesc), list(range(10)))
# A nested iter
a = blaze.array((i for i in range(x)) for x in range(5))
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('5 * var * int32'))
self.assertEqual(ddesc_as_py(a.ddesc),
[[i for i in range(x)] for x in range(5)])
# A list of iter
a = blaze.array([range(3), (1.5*x for x in range(4)), iter([-1, 1])])
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('3 * var * float64'))
self.assertEqual(ddesc_as_py(a.ddesc),
[list(range(3)),
[1.5*x for x in range(4)],
[-1, 1]])
def test_create_compress_iter(self):
# A compressed array (backed by BLZ)
ddesc = BLZ_DDesc(mode='w', bparams=blz.bparams(clevel=5))
a = blaze.array((i for i in range(10)), ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), list(range(10)))
def test_create_zeros(self):
# A default array
a = blaze.zeros('10 * int64')
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), [0]*10)
def test_create_compress_zeros(self):
# A compressed array (backed by BLZ)
ddesc = BLZ_DDesc(mode='w', bparams=blz.bparams(clevel=5))
a = blaze.zeros('10 * int64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), [0]*10)
def test_create_ones(self):
# A default array
a = blaze.ones('10 * int64')
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), [1]*10)
def test_create_compress_ones(self):
# A compressed array (backed by BLZ)
ddesc = BLZ_DDesc(mode='w', bparams=blz.bparams(clevel=5))
a = blaze.ones('10 * int64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), [1]*10)
def test_create_record(self):
# A simple record array
a = blaze.array([(10, 3.5), (15, 2.25)],
dshape="var * {val: int32, flt: float32}")
self.assertEqual(ddesc_as_py(a.ddesc), [{'val': 10, 'flt': 3.5},
{'val': 15, 'flt': 2.25}])
# Test field access via attributes
aval = a.val
self.assertEqual(ddesc_as_py(aval.ddesc), [10, 15])
aflt = a.flt
self.assertEqual(ddesc_as_py(aflt.ddesc), [3.5, 2.25])
class TestBLZPersistent(MayBePersistentTest, unittest.TestCase):
disk = True
dir_ = True
def test_create(self):
ddesc = BLZ_DDesc(path=self.rootdir, mode='w')
a = blaze.array([2], 'float64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
self.assertTrue(a.dshape.shape == (1,))
self.assertEqual(ddesc_as_py(a.ddesc), [2])
def test_append(self):
ddesc = BLZ_DDesc(path=self.rootdir, mode='w')
a = blaze.zeros('0 * float64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
append(a, list(range(10)))
self.assertEqual(ddesc_as_py(a.ddesc), list(range(10)))
# Using a 1-dim as the internal dimension
def test_append2(self):
ddesc = BLZ_DDesc(path=self.rootdir, mode='w')
a = blaze.empty('0 * 2 * float64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
lvals = [[i,i*2] for i in range(10)]
append(a, lvals)
self.assertEqual(ddesc_as_py(a.ddesc), lvals)
class TestHDF5Persistent(MayBePersistentTest, unittest.TestCase):
disk = True
@skipIf(not tables_is_here, 'pytables is not installed')
def test_create(self):
ddesc = HDF5_DDesc(path=self.file, datapath='/earray', mode='w')
a = blaze.array([2], 'float64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
self.assertTrue(a.dshape.shape == (1,))
self.assertEqual(ddesc_as_py(a.ddesc), [2])
@skipIf(not tables_is_here, 'pytables is not installed')
def test_append(self):
ddesc = HDF5_DDesc(path=self.file, datapath='/earray', mode='a')
a = blaze.zeros('0 * float64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
append(a, list(range(10)))
self.assertEqual(ddesc_as_py(a.ddesc), list(range(10)))
# Using a 1-dim as the internal dimension
@skipIf(not tables_is_here, 'pytables is not installed')
def test_append2(self):
ddesc = HDF5_DDesc(path=self.file, datapath='/earray', mode='a')
a = blaze.empty('0 * 2 * float64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
lvals = [[i,i*2] for i in range(10)]
append(a, lvals)
self.assertEqual(ddesc_as_py(a.ddesc), lvals)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
py | 1a33747b73a0202390c1ce4e17a7a142fb4d6161 | from bokeh.util.browser import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.plotting import output_file
from bokeh.models import Plot
from bokeh.models import Range1d
from bokeh.models import WheelZoomTool, PanTool, BoxZoomTool
from bokeh.models import WMTSTileSource
output_file("tile_source_example.html", title="Tile Source Example")
# set to roughly full extent of web mercator projection
x_range = Range1d(start=-200000, end=2000000)
y_range = Range1d(start=800000, end=7000000)
# create tile source from templated url
tile_options = {}
tile_options['url'] = 'http://c.tile.openstreetmap.org/{z}/{x}/{y}.png'
tile_source = WMTSTileSource(**tile_options)
# instantiate plot and add tile source
p = Plot(x_range=x_range, y_range=y_range, plot_height=800, plot_width=800)
p.add_tools(WheelZoomTool(), PanTool(), BoxZoomTool(match_aspect=True))
tile_renderer_options = {}
p.add_tile(tile_source, **tile_renderer_options)
doc = Document()
doc.add_root(p)
if __name__ == "__main__":
doc.validate()
filename = "tile_source.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Tile Source Example"))
print("Wrote %s" % filename)
view(filename)
|
py | 1a337483f669514966c955bd4492b59872066ae7 | def func():
value = "not-none"
<caret>if value is None:
return
# Is not none
# If it's not none
print(value) |
py | 1a33756038d5566ec9999aff0886243e333c4931 | import os
import sys
import h5py
import numpy as np
import pandas as pd
import tensorflow as tf
from fm_preprocessing import DeepFmData, Dataset
from nn_loss_metrics import get_config
from utils import top_ratio_hit_rate, train_sampling, calc_threshold_vs_depth
from deepFM import DeepFM
from xDeepFM import xDeepFM
from AFM import AFM
from utils import colorize
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings(action='ignore')
def train_test_split(Xv,Xi,y, test_ratio=0.1, seed=None):
index = list(range(len(Xv)))
Xv = np.asarray(Xv)
Xi = np.asarray(Xi)
y = np.asarray(y)
np.random.seed(seed)
np.random.shuffle(index)
test_size = int(len(Xv)*test_ratio)
test_index = index[-test_size:]
train_index = index[:-test_size]
train_Xv = Xv[train_index]
test_Xv = Xv[test_index]
train_Xi = Xi[train_index]
test_Xi = Xi[test_index]
train_y = y[train_index]
test_y = y[test_index]
return train_Xv.tolist(), test_Xv.tolist(), train_Xi.tolist(),test_Xi.tolist(), train_y, test_y
def data_preprocess(train_data, test_data=None, label='is_y2', deepEnc = None, batch_size=128,
skew_threshold=5, val_ratio=0.2, double_process='z-score', save_h5_file=None,
seed=None):
train_y = train_data[label].values.reshape(-1,1)
train_data.drop(columns=[label],inplace=True)
# ---------------train data
if deepEnc is None:
enc = DeepFmData(skew_threshold=skew_threshold,double_process=double_process)
enc.fit(train_data,y=None)
else:
enc = deepEnc
train_feat_val, train_feat_index = enc.transform(train_data, y=None, normalize_double=True) #list of list
#-----------------val data
if val_ratio is not None:
(train_feat_val, val_feat_val,
train_feat_index, val_feat_index,
train_y,val_y ) = train_test_split(train_feat_val, train_feat_index, train_y,test_ratio=val_ratio, seed=seed)
else:
(val_feat_val, val_feat_index,val_y) =[None]*3
train_data = Dataset(train_feat_val, train_feat_index, train_y, batch_size, shuffle=True)
#---------------test_data-----------------
if test_data is not None:
test_y = test_data[label].values.reshape(-1,1)
test_data.drop(columns=[label],inplace=True)
test_feat_val, test_feat_index = enc.transform(test_data, y=None, normalize_double=True)
test_data = Dataset(test_feat_val, test_feat_index,test_y, batch_size)
else:
(test_feat_val, test_feat_index,test_y) =[None]*3
if save_h5_file is not None:
with h5py.File(save_h5_file,'w') as fw:
train = fw.create_group('train')
train.create_dataset('train_feat_val', data = np.array(train_feat_val))
train.create_dataset('train_feat_index',data = np.array(train_feat_index))
train.create_dataset('train_y', data= np.array(train_y))
val = fw.create_group('val')
val.create_dataset('val_feat_val', data =np.array(val_feat_val))
val.create_dataset('val_feat_index',data= np.array(val_feat_index))
val.create_dataset('val_y', data=np.array(val_y))
test = fw.create_group('test')
test.create_dataset('test_feat_val', data =np.array(test_feat_val))
test.create_dataset('test_feat_index',data= np.array(test_feat_index))
test.create_dataset('test_y', data=np.array(test_y))
return enc, train_data, test_data, train_feat_val, train_feat_index, train_y, val_feat_val, val_feat_index, val_y
def load_h5_data(h5file, batch_size=128, shuffle=True):
assert os.path.exists(h5file)
with h5py.File(h5file, 'r') as fr:
print('train-null', np.isnan(fr['train']['train_feat_val'].value).sum())
train_feat_val = fr['train']['train_feat_val'].value.tolist()
train_feat_index = fr['train']['train_feat_index'].value.tolist()
train_y = fr['train']['train_y'].value
train_data = Dataset(train_feat_val, train_feat_index, train_y, batch_size, shuffle=True)
val_feat_val = fr['val']['val_feat_val'].value.tolist()
val_feat_index = fr['val']['val_feat_index'].value.tolist()
val_y = fr['val']['val_y'].value
test_feat_val = fr['test']['test_feat_val'].value.tolist()
test_feat_index = fr['test']['test_feat_index'].value.tolist()
test_y = fr['test']['test_y'].value
test_data = Dataset(test_feat_val, test_feat_index,test_y, batch_size)
return train_data, test_data, train_feat_val, train_feat_index, train_y, val_feat_val, val_feat_index, val_y
if __name__ == '__main__':
import yaml,json
# pd.set_option('max_colwidth',10)
# os.environ["CUDA_VISIBLE_DEVICES"] ='0'
sess_config = get_config(frac=0.4, allow_growth=True, gpu="1")
pd.set_option('display.max_columns', 20)
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
with open('./conf.yaml','r') as fp:
config = yaml.load(fp)
model_type = config['model']
config = config.get(model_type)
print(json.dumps(config,indent=2))
# config = config['deepFM'] if model_type=='deepFM' else config['xDeepFM']
data_config, params = config['data'],config['params']
print(' {} '.format(model_type).center(50,'='))
train_file = data_config['train'] #"/home/yuanyuqing163/hb_rl/data/raw/train_bj_dl_200.pkl"
test_file = data_config['test'] #"/home/yuanyuqing163/hb_rl/data/raw/val_bj_dl_200.pkl"
train_data = pd.read_pickle(train_file)
test_data = pd.read_pickle(test_file)
train_data = train_sampling(train_data, col='is_y2', method='down', pn_ratio=0.2, seed=2020)
# train_data = train_sampling(train_data, col='is_y2', method='up', pn_ratio=0.5, seed=2019)
# train_data = train_sampling(train_data, col='is_y2', method='down', pn_ratio=0.5, seed=2019)
# train_data = train_sampling(train_data, col='is_y2', method='down', pn_ratio=0.05, seed=2019)
if data_config.pop('load_cache'):
enc = DeepFmData()
enc.load(data_config['enc_file']) #'./model/DeepFmData_bjdl200.pkl'
(train_data, test_data,
train_feat_val, train_feat_index, train_y,
val_feat_val, val_feat_index, val_y) = load_h5_data(data_config['cache_file'], batch_size= params['batch_size'], shuffle=True) #'./data/bj_dl_200.h5'
else:
(enc, train_data, test_data,
train_feat_val, train_feat_index, train_y,
val_feat_val, val_feat_index, val_y) = data_preprocess(train_data, test_data,
deepEnc = None, batch_size= params['batch_size'], skew_threshold=5, val_ratio=0.2,
double_process='min-max', save_h5_file=data_config['cache_file'],label='is_y2')
enc.save(data_config['enc_file'])
print(enc._field_dim, enc._feature_dim)
params.update({'feature_size':enc._feature_dim})
params.update({'field_size':enc._field_dim})
if model_type.lower()=='deepfm':
model = DeepFM(params)
elif model_type.lower() =='xdeepfm':
model = xDeepFM(params)
elif model_type.lower() =='afm':
model = AFM(params)
else:
raise ValueError('{} not supported yet'.format(model_type))
with tf.Session(config=sess_config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer()) # global_step counter etc.
sys.stdout.flush()
best_hit_rate = 0
best_epoch = 0
best_loss = np.finfo('float32').max
stop_cnt = 0
if params['training_model']:
#---------------training---------------------------------
for epoch in range(params['epoch']):
print('epoch ={}'.format(epoch).center(50,'-'))
for batch, (xi, xv, y) in enumerate(train_data):
# print(xv)
step, prob = model.train(sess, xi, xv, y)
# print(prob.min(),prob.max())
if batch %1000 ==0:
train_loss, train_entropy, train_reg = model.evaluate(sess, train_feat_index, train_feat_val, train_y, batch_size=128)
print('train_loss=%.4f,\ttrain_ce=%.4f,\treg=%.4f'% (train_loss, train_entropy, train_reg))
val_loss,val_entropy, val_reg = model.evaluate(sess, val_feat_index, val_feat_val, val_y, batch_size=128)
print('val_loss=%.4f,\tval_ce=%.4f,\treg=%.4f' %(val_loss, val_entropy, val_reg))
# if epoch%10 ==0 or epoch == params['epoch']-1:
model.save(sess, model.checkpoint_dir, epoch)
prob = model.predict(sess, train_feat_index, train_feat_val, batch_size=128)
hit_rate, top_k = top_ratio_hit_rate(np.array(train_y).ravel(), prob, top_ratio=0.001) # ravel return view, flatten return copy
train_auc = roc_auc_score(np.array(train_y).ravel(), prob)
print(colorize('\nk={}, train_1/1000 ={:.4}'.format(top_k ,hit_rate),'cyan',True))
#-----------------test-----------------------------------
probs =[]
ys=[]
for xi, xv, y in test_data:
prob = model.predict(sess, xi, xv) # list of np.ndarry->array
probs.extend(prob.tolist())
ys.extend(y.tolist())
hit_rate, top_k = top_ratio_hit_rate(np.array(ys).ravel(), np.array(probs), top_ratio=0.001)
val_auc = roc_auc_score(np.array(ys).ravel(), np.array(probs))
print(colorize('k={}, test_1/1000 ={:.4}'.format(top_k ,hit_rate),'cyan',True))
print(colorize('train_auc={:.4}, val_auc={:.4}'.format(train_auc,val_auc),'cyan', True))
if hit_rate > best_hit_rate:
best_hit_rate, best_epoch = hit_rate, epoch
print(colorize('cur_best_rate ={:.4}'.format(best_hit_rate),'cyan',True))
if hit_rate>0.8:
calc_threshold_vs_depth(np.asarray(ys).ravel(), np.asarray(probs))
# early stopping
if (val_entropy+5e-5)<best_loss:
best_loss = val_entropy
stop_cnt = 0
else:
stop_cnt += 1
if stop_cnt > 20:
break
print(colorize('epoch={}, best_hit_rate={}'.format(best_epoch, best_hit_rate),'cyan',True))
else:
model.restore(sess, os.path.split(model.checkpoint_dir)[0])
probs=[]
ys =[]
for xi, xv, y in train_data:
prob = model.predict(sess, xi, xv) # np.ndarry
probs.extend(prob[0].ravel().tolist())
ys.extend(y.tolist())
hit_rate, top_k = top_ratio_hit_rate(np.array(ys).ravel(), np.array(probs).ravel(), top_ratio=0.001)
print('train-top-k={}, train-hit-rate={}'.format(top_k ,hit_rate))
probs=[]
ys=[]
for xi, xv, y in test_data:
prob = model.predict(sess, xi, xv) # np.ndarry
# print(type(prob), prob[0])
probs.extend(prob[0].ravel().tolist())
ys.extend(y.tolist())
hit_rate, top_k = top_ratio_hit_rate(np.array(ys).ravel(), np.array(probs).ravel(), top_ratio=0.001)
print('test-top-k={}, test-hit-rate={}'.format(top_k ,hit_rate)) |
py | 1a3375a12bd28412e0a76986dc77139f3034554c | # Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality related to player-controlled Spazzes."""
from __future__ import annotations
from typing import TYPE_CHECKING, TypeVar, overload
import ba
from bastd.actor.spaz import Spaz
if TYPE_CHECKING:
from typing import Any, Sequence, Tuple, Optional, Type
from typing_extensions import Literal
PlayerType = TypeVar('PlayerType', bound=ba.Player)
TeamType = TypeVar('TeamType', bound=ba.Team)
class PlayerSpazHurtMessage:
"""A message saying a ba.PlayerSpaz was hurt.
category: Message Classes
Attributes:
spaz
The ba.PlayerSpaz that was hurt
"""
def __init__(self, spaz: PlayerSpaz):
"""Instantiate with the given ba.Spaz value."""
self.spaz = spaz
class PlayerSpaz(Spaz):
"""A ba.Spaz subclass meant to be controlled by a ba.Player.
category: Gameplay Classes
When a PlayerSpaz dies, it delivers a ba.PlayerDiedMessage
to the current ba.Activity. (unless the death was the result of the
player leaving the game, in which case no message is sent)
When a PlayerSpaz is hurt, it delivers a ba.PlayerSpazHurtMessage
to the current ba.Activity.
"""
def __init__(self,
player: ba.Player,
color: Sequence[float] = (1.0, 1.0, 1.0),
highlight: Sequence[float] = (0.5, 0.5, 0.5),
character: str = 'Spaz',
powerups_expire: bool = True):
"""Create a spaz for the provided ba.Player.
Note: this does not wire up any controls;
you must call connect_controls_to_player() to do so.
"""
super().__init__(color=color,
highlight=highlight,
character=character,
source_player=player,
start_invincible=True,
powerups_expire=powerups_expire)
self.last_player_attacked_by: Optional[ba.Player] = None
self.last_attacked_time = 0.0
self.last_attacked_type: Optional[Tuple[str, str]] = None
self.held_count = 0
self.last_player_held_by: Optional[ba.Player] = None
self._player = player
self._drive_player_position()
@overload
def getplayer(self,
playertype: Type[PlayerType],
doraise: Literal[False] = False) -> Optional[PlayerType]:
...
@overload
def getplayer(self, playertype: Type[PlayerType],
doraise: Literal[True]) -> PlayerType:
...
def getplayer(self,
playertype: Type[PlayerType],
doraise: bool = False) -> Optional[PlayerType]:
"""Get the ba.Player associated with this Spaz.
By default this will return None if the Player no longer exists.
If you are logically certain that the Player still exists, pass
doraise=False to get a non-optional return type.
"""
player: Any = self._player
assert isinstance(player, playertype)
if not player.exists() and doraise:
raise ba.PlayerNotFoundError()
return player if player.exists() else None
def connect_controls_to_player(self,
enable_jump: bool = True,
enable_punch: bool = True,
enable_pickup: bool = True,
enable_bomb: bool = True,
enable_run: bool = True,
enable_fly: bool = True) -> None:
"""Wire this spaz up to the provided ba.Player.
Full control of the character is given by default
but can be selectively limited by passing False
to specific arguments.
"""
player = self.getplayer(ba.Player)
assert player
# Reset any currently connected player and/or the player we're
# wiring up.
if self._connected_to_player:
if player != self._connected_to_player:
player.reset_input()
self.disconnect_controls_from_player()
else:
player.reset_input()
player.assign_input_call('upDown', self.on_move_up_down)
player.assign_input_call('leftRight', self.on_move_left_right)
player.assign_input_call('holdPositionPress',
self._on_hold_position_press)
player.assign_input_call('holdPositionRelease',
self._on_hold_position_release)
if enable_jump:
player.assign_input_call('jumpPress', self.on_jump_press)
player.assign_input_call('jumpRelease', self.on_jump_release)
if enable_pickup:
player.assign_input_call('pickUpPress', self.on_pickup_press)
player.assign_input_call('pickUpRelease', self.on_pickup_release)
if enable_punch:
player.assign_input_call('punchPress', self.on_punch_press)
player.assign_input_call('punchRelease', self.on_punch_release)
if enable_bomb:
player.assign_input_call('bombPress', self.on_bomb_press)
player.assign_input_call('bombRelease', self.on_bomb_release)
if enable_run:
player.assign_input_call('run', self.on_run)
if enable_fly:
player.assign_input_call('flyPress', self.on_fly_press)
player.assign_input_call('flyRelease', self.on_fly_release)
self._connected_to_player = player
def disconnect_controls_from_player(self) -> None:
"""
Completely sever any previously connected
ba.Player from control of this spaz.
"""
if self._connected_to_player:
self._connected_to_player.reset_input()
self._connected_to_player = None
# Send releases for anything in case its held.
self.on_move_up_down(0)
self.on_move_left_right(0)
self._on_hold_position_release()
self.on_jump_release()
self.on_pickup_release()
self.on_punch_release()
self.on_bomb_release()
self.on_run(0.0)
self.on_fly_release()
else:
print('WARNING: disconnect_controls_from_player() called for'
' non-connected player')
def handlemessage(self, msg: Any) -> Any:
# FIXME: Tidy this up.
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
# pylint: disable=too-many-nested-blocks
if __debug__:
self._handlemessage_sanity_check()
# Keep track of if we're being held and by who most recently.
if isinstance(msg, ba.PickedUpMessage):
super().handlemessage(msg) # Augment standard behavior.
self.held_count += 1
picked_up_by = ba.playercast_o(type(self._player),
msg.node.source_player)
if picked_up_by:
self.last_player_held_by = picked_up_by
elif isinstance(msg, ba.DroppedMessage):
super().handlemessage(msg) # Augment standard behavior.
self.held_count -= 1
if self.held_count < 0:
print('ERROR: spaz held_count < 0')
# Let's count someone dropping us as an attack.
try:
picked_up_by_2 = ba.playercast_o(type(self._player),
msg.node.source_player)
except Exception:
picked_up_by_2 = None
if picked_up_by_2:
self.last_player_attacked_by = picked_up_by_2
self.last_attacked_time = ba.time()
self.last_attacked_type = ('picked_up', 'default')
elif isinstance(msg, ba.StandMessage):
super().handlemessage(msg) # Augment standard behavior.
# Our Spaz was just moved somewhere. Explicitly update
# our associated player's position in case it is being used
# for logic (otherwise it will be out of date until next step)
self._drive_player_position()
elif isinstance(msg, ba.DieMessage):
# Report player deaths to the game.
if not self._dead:
# Immediate-mode or left-game deaths don't count as 'kills'.
killed = (not msg.immediate
and msg.how is not ba.DeathType.LEFT_GAME)
activity = self._activity()
player = self.getplayer(ba.Player, doraise=False)
if not killed:
killerplayer = None
else:
# If this player was being held at the time of death,
# the holder is the killer.
if self.held_count > 0 and self.last_player_held_by:
killerplayer = self.last_player_held_by
else:
# Otherwise, if they were attacked by someone in the
# last few seconds, that person is the killer.
# Otherwise it was a suicide.
# FIXME: Currently disabling suicides in Co-Op since
# all bot kills would register as suicides; need to
# change this from last_player_attacked_by to
# something like last_actor_attacked_by to fix that.
if (self.last_player_attacked_by
and ba.time() - self.last_attacked_time < 4.0):
killerplayer = self.last_player_attacked_by
else:
# ok, call it a suicide unless we're in co-op
if (activity is not None and not isinstance(
activity.session, ba.CoopSession)):
killerplayer = player
else:
killerplayer = None
# We should never wind up with a dead-reference here;
# we want to use None in that case.
assert killerplayer is None or killerplayer
# Only report if both the player and the activity still exist.
if killed and activity is not None and player:
activity.handlemessage(
ba.PlayerDiedMessage(player, killed, killerplayer,
msg.how))
super().handlemessage(msg) # Augment standard behavior.
# Keep track of the player who last hit us for point rewarding.
elif isinstance(msg, ba.HitMessage):
source_player = msg.get_source_player(type(self._player))
if source_player:
self.last_player_attacked_by = source_player
self.last_attacked_time = ba.time()
self.last_attacked_type = (msg.hit_type, msg.hit_subtype)
super().handlemessage(msg) # Augment standard behavior.
activity = self._activity()
if activity is not None and self._player.exists():
activity.handlemessage(PlayerSpazHurtMessage(self))
else:
super().handlemessage(msg)
def _drive_player_position(self) -> None:
"""Drive our ba.Player's official position
If our position is changed explicitly, this should be called again
to instantly update the player position (otherwise it would be out
of date until the next sim step)
"""
player = self._player
if player:
assert self.node
assert player.node
self.node.connectattr('torso_position', player.node, 'position')
|
py | 1a33774d1a401fc9b6a4098e40e1a1540961e373 | from collections import deque
mydq =deque([10,11,12,13])
print(mydq)
mydq.appendleft(9)
print(mydq)
mydq.append(14)
print(mydq)
print(mydq.popleft())
print(mydq)
print(mydq.pop())
print(mydq)
|
py | 1a33781bbe43d6040a8fba67996f30097d05ee56 | from Client import SysAdminClient
from Client import UnpackFromProto
from ConfigTemplateRenderer import ConfigTemplateRenderer
from LazySysAdmin import LazySysAdmin
from Migrations import SysAdminMigrator
from SysAdminUser import FetchAllValues
__all__ = [
'ConfigTemplateRenderer',
'SysAdminClient',
'FetchAllValues',
'UnpackFromProto',
'SysAdminMigrator',
'LazySysAdmin',
]
|
py | 1a337a15937c9eccde248a0cb937b76b75ac1dc9 | """
Helper functions for command line utilities.
"""
import argparse
import json
import logging
import logging.handlers
import os
import sys
import threading
import time
import warnings
from smqtk.utils.base_object import SmqtkObject
from smqtk.utils.dict import merge_dict
def initialize_logging(logger, stream_level=logging.WARNING,
output_filepath=None, file_level=None):
"""
Standard logging initialization.
:param logger: Logger instance to initialize
:type logger: logging.Logger
:param stream_level: Logging level to set for the stderr stream formatter.
:type stream_level: int
:param output_filepath: Output logging from the given logger to the provided
file path. Currently, we log to that file indefinitely, i.e. no
rollover. Rollover may be added in the future if the need arises.
:type output_filepath: str
:param file_level: Logging level to output to the file. This the same as the
stream level by default.
"""
log_formatter = logging.Formatter(
"%(levelname)7s - %(asctime)s - %(name)s.%(funcName)s - %(message)s"
)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(log_formatter)
stream_handler.setLevel(stream_level)
logger.addHandler(stream_handler)
if output_filepath:
# TODO: Setup rotating part of the handler?
file_handler = logging.handlers.RotatingFileHandler(
output_filepath, mode='w', delay=True
)
file_handler.setFormatter(log_formatter)
file_handler.setLevel(file_level or stream_level)
logger.addHandler(file_handler)
# Because there are two levels checked before a logging message is emitted:
# * the logging object's level
# * The stream handlers level
logger.setLevel(min(stream_level, file_level or stream_level))
def load_config(config_path, defaults=None):
"""
Load the JSON configuration dictionary from the specified filepath.
If the given path does not point to a valid file, we return an empty
dictionary or the default dictionary if one was provided, returning False
as our second return argument.
:param config_path: Path to the (valid) JSON configuration file.
:type config_path: str
:param defaults: Optional default configuration dictionary to merge loaded
configuration into. If provided, it will be modified in place.
:type defaults: dict | None
:return: The result configuration dictionary and if we successfully loaded
a JSON dictionary from the given filepath.
:rtype: (dict, bool)
"""
if defaults is None:
defaults = {}
loaded = False
if config_path and os.path.isfile(config_path):
with open(config_path) as cf:
merge_dict(defaults, json.load(cf))
loaded = True
return defaults, loaded
def output_config(output_path, config_dict, log=None, overwrite=False,
error_rc=1):
"""
If a valid output configuration path is provided, we output the given
configuration dictionary as JSON or error if the file already exists (when
overwrite is False) or cannot be written. We exit the program as long as
``output_path`` was given a value, with a return code of 0 if the file was
written successfully, or the supplied return code (default of 1) if the
write failed.
Specified error return code cannot be 0, which is reserved for successful
operation.
:raises ValueError: If the given error return code is 0.
:param output_path: Path to write the configuration file to.
:type output_path: str
:param config_dict: Configuration dictionary containing JSON-compliant
values.
:type config_dict: dict
:param overwrite: If we should clobber any existing file at the specified
path. We exit with the error code if this is false and a file exists at
``output_path``.
:type overwrite: bool
:param error_rc: Custom integer error return code to use instead of 1.
:type error_rc: int
:param log: Optionally logging instance. Otherwise we use a local one.
:type log: logging.Logger
"""
error_rc = int(error_rc)
if error_rc == 0:
raise ValueError("Error return code cannot be 0.")
if log is None:
log = logging.getLogger(__name__)
if output_path:
if os.path.exists(output_path) and not overwrite:
log.error("Output configuration file path already exists! (%s)",
output_path)
sys.exit(error_rc)
else:
log.info("Outputting JSON configuration to: %s", output_path)
with open(output_path, 'w') as f:
json.dump(config_dict, f, indent=4, check_circular=True,
separators=(',', ': '), sort_keys=True)
sys.exit(0)
class ProgressReporter (SmqtkObject):
"""
Helper utility for reporting the state of a loop and the rate at which
looping is occurring based on lapsed wall-time and a given reporting
interval.
Includes optional methods that are thread-safe.
TODO: Add parameter for an optionally known total number of increments.
"""
def __init__(self, log_func, interval, what_per_second="Loops"):
"""
Initialize this reporter.
:param log_func: Logging function to use.
:type log_func: (str, *args, **kwds) -> None
:param interval: Time interval to perform reporting in seconds. If no
reporting during incrementation should occur, infinity should be
passed.
:type interval: float
:param str what_per_second:
String label about what is happening or being iterated over per
second. The provided string should make sense when followed by
" per second ...".
"""
self.log_func = log_func
self.interval = float(interval)
self.what_per_second = what_per_second
self.lock = threading.RLock()
# c_last : Increment count at the time of the last report. Updated after
# report in ``increment_report``.
# c : Current Increment count, updated in ``increment_report``.
# c_delta: Delta between the increment current and previous count at the
# time of the last report. Updated at the time of reporting in
# ``increment_report``.
self.c_last = self.c = self.c_delta = 0
# t_last : Time of the last report. Updated after report in
# ``increment_report``.
# t : Current time, Updated in ``increment_report``
# t_delta: Delta between current time and the time of the last report.
# Updated in ``increment_report``.
self.t_last = self.t = self.t_delta = self.t_start = 0.0
self.started = False
def start(self):
""" Start the timing state of this reporter.
Repeated calls to this method resets the state of the reporting for
multiple uses.
This method is thread-safe.
:returns: Self
:rtype: ProgressReporter
"""
with self.lock:
self.started = True
self.c_last = self.c = self.c_delta = 0
self.t_last = self.t = self.t_start = time.time()
self.t_delta = 0.0
return self
def increment_report(self):
"""
Increment counter and time since last report, reporting if delta exceeds
the set reporting interval period.
"""
if not self.started:
raise RuntimeError("Reporter needs to be started first.")
self.c += 1
self.c_delta = self.c - self.c_last
self.t = time.time()
self.t_delta = self.t - self.t_last
# Only report if its been ``interval`` seconds since the last
# report.
if self.t_delta >= self.interval:
self.report()
self.t_last = self.t
self.c_last = self.c
def increment_report_threadsafe(self):
"""
The same as ``increment_report`` but additionally acquires a lock on
resources first for thread-safety.
This version of the method is a little more costly due to the lock
acquisition.
"""
with self.lock:
self.increment_report()
def report(self):
"""
Report the current state.
Does nothing if no increments have occurred yet.
"""
if not self.started:
raise RuntimeError("Reporter needs to be started first.")
# divide-by-zero safeguard
if self.t_delta > 0 and (self.t - self.t_start) > 0:
self.log_func("%s per second %f (avg %f) "
"(%d current interval / %d total)"
% (self.what_per_second,
self.c_delta / self.t_delta,
self.c / (self.t - self.t_start),
self.c_delta,
self.c))
def report_threadsafe(self):
"""
The same as ``report`` but additionally acquires a lock on
resources first for thread-safety.
This version of the method is a little more costly due to the lock
acquisition.
"""
with self.lock:
self.report()
def report_progress(log, state, interval):
"""
Loop progress reporting function that logs (when in debug) loops per
second, loops in the last reporting period and total loops executed.
The ``state`` given to this function must be a list of 7 integers, initially
all set to 0. This function will update the fields of the state as its is
called to control when reporting should happen and what to report.
A report can be effectively force for a call by setting ``state[3] = 0``
or ``interval`` to ``0``.
:param log: Logger logging function to use to send reporting message to.
:type log: (str, *args, **kwargs) -> None
:param state: Reporting state. This should be initialized to a list of 6
zeros (floats), and then should not be modified externally from this
function.
:type state: list[float]
:param interval: Frequency in seconds that reporting messages should be
made. This should be greater than 0.
:type interval: float
"""
warnings.warn("``report_progress`` is deprecated. Please use the"
"``ProgressReporter`` class instead.",
DeprecationWarning)
# State format (c=count, t=time:
# [last_c, c, delta_c, last_t, t, delta_t, starting_t]
# [ 0, 1, 2, 3, 4, 5, 6 ]
warnings.warn(
'report_progress is deprecated, use ProgressReporter instead.',
DeprecationWarning)
# Starting time
if not state[6]:
state[3] = state[6] = time.time()
state[1] += 1
state[4] = time.time()
state[5] = state[4] - state[3]
if state[5] >= interval:
state[2] = state[1] - state[0]
# TODO: Could possibly to something with ncurses
# - to maintain a single line.
try:
loops_per_second = state[2] / state[5]
avg_loops_per_second = state[1] / (state[4] - state[6])
except ZeroDivisionError:
loops_per_second = 0
avg_loops_per_second = 0
log("Loops per second %f (avg %f) (%d this interval / %d total)"
% (loops_per_second,
avg_loops_per_second,
state[2], state[1]))
state[3] = state[4]
state[0] = state[1]
def basic_cli_parser(description=None, configuration_group=True):
"""
Generate an ``argparse.ArgumentParser`` with the given description and the
basic options for verbosity and configuration/generation paths.
The returned parser instance has an option for extra verbosity
(-v/--verbose) and a group for configuration specification (-c/--config and
configuration generation (-g/--generate-config) if enabled (true by
default).
:param description: Optional description string for the parser.
:type description: str
:param configuration_group: Whether or not to include the configuration
group options.
:type configuration_group: bool
:return: Argument parser instance with basic options.
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('-v', '--verbose',
default=False, action='store_true',
help='Output additional debug logging.')
if configuration_group:
g_config = parser.add_argument_group('Configuration')
g_config.add_argument('-c', '--config',
metavar="PATH",
help='Path to the JSON configuration file.')
g_config.add_argument('-g', '--generate-config',
metavar="PATH",
help='Optionally generate a default '
'configuration file at the specified path. '
'If a configuration file was provided, we '
'update the default configuration with the '
'contents of the given configuration.')
return parser
def utility_main_helper(default_config, args, additional_logging_domains=(),
skip_logging_init=False, default_config_valid=False):
"""
Helper function for utilities standardizing logging initialization, CLI
parsing and configuration loading/generation.
Specific utilities should use this in their main function. This
encapsulates the following standard actions:
- using ``argparse`` parser results to drive logging initialization
(can be skipped if initialized externally)
- handling loaded configuration merger onto the default
- handling configuration generation based on given default and possibly
specified input config.
:param default_config: Function returning default configuration (JSON)
dictionary for the utility. This should take no arguments.
:type default_config: () -> dict
:param args: Parsed arguments from argparse.ArgumentParser instance as
returned from ``parser.parse_args()``.
:type args: argparse.Namespace
:param additional_logging_domains: We initialize logging on the base
``smqtk`` and ``__main__`` namespace. Any additional namespaces under
which logging should be reported should be added here as an iterable.
:type additional_logging_domains: collections.abc.Iterable[str]
:param skip_logging_init: Skip initialize logging in this function because
it is done elsewhere externally.
:type skip_logging_init: bool
:param default_config_valid: Whether the default config returned from the
generator is a valid config to continue execution with or not.
:type default_config_valid: bool
:return: Loaded configuration dictionary.
:rtype: dict
"""
# noinspection PyUnresolvedReferences
config_filepath = args.config
# noinspection PyUnresolvedReferences
config_generate = args.generate_config
# noinspection PyUnresolvedReferences
verbose = args.verbose
if not skip_logging_init:
llevel = logging.INFO
if verbose:
llevel = logging.DEBUG
initialize_logging(logging.getLogger('smqtk'), llevel)
initialize_logging(logging.getLogger('__main__'), llevel)
for d in additional_logging_domains:
initialize_logging(logging.getLogger(d), llevel)
config, config_loaded = load_config(config_filepath, default_config())
output_config(config_generate, config, overwrite=True)
if not (config_loaded or default_config_valid):
raise RuntimeError("No configuration loaded (not trusting default).")
return config
|
py | 1a337b0abdbbd04ade7bbb3945d7669e4815d518 | from param import args
import sys
# sys.path.insert(0, '/R2R-EnvDrop/build')
if args.upload:
sys.path.insert(0, '/R2R-Aux/build')
else:
sys.path.insert(0, 'build')
# setup_seed(args.seed)
import torch
import os
import time
import json
import numpy as np
from collections import defaultdict
from speaker import Speaker
from utils import read_vocab,write_vocab,build_vocab,Tokenizer,padding_idx,timeSince, read_img_features, get_sync_dir
import utils
from env import R2RBatch
from agent import Seq2SeqAgent
from eval import Evaluation
from polyaxon_client.tracking import get_outputs_refs_paths
if args.train == 'validlistener' and args.upload:
refs_paths = get_outputs_refs_paths()['experiments'][0]
print(refs_paths)
load_model = os.path.join(refs_paths,args.load)
print(load_model)
import warnings
warnings.filterwarnings("ignore")
from tensorboardX import SummaryWriter
from polyaxon_client.tracking import get_outputs_path
if args.upload:
train_vocab = get_sync_dir(os.path.join(args.upload_path,args.TRAIN_VOCAB))
trainval_vocab = get_sync_dir(os.path.join(args.upload_path,args.TRAINVAL_VOCAB))
features = get_sync_dir(os.path.join(args.upload_path,args.IMAGENET_FEATURES))
output_dir = get_outputs_path()
log_dir = os.path.join(output_dir, "snap", args.name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# sparse_obj_feat = get_sync_dir(os.path.join(args.upload_path, args.SPARSE_OBJ_FEATURES))
# dense_obj_feat1 = get_sync_dir(os.path.join(args.upload_path, args.DENSE_OBJ_FEATURES1))
# dense_obj_feat2 = get_sync_dir(os.path.join(args.upload_path, args.DENSE_OBJ_FEATURES2))
# bbox = get_sync_dir(os.path.join(args.upload_path, args.BBOX_FEATURES))
else:
train_vocab = os.path.join(args.R2R_Aux_path,args.TRAIN_VOCAB)
trainval_vocab = os.path.join(args.R2R_Aux_path,args.TRAINVAL_VOCAB)
features = os.path.join(args.R2R_Aux_path,args.IMAGENET_FEATURES)
log_dir = 'snap/%s' % args.name
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# sparse_obj_feat = os.path.join(args.R2R_Aux_path, args.SPARSE_OBJ_FEATURES)
# dense_obj_feat1 = os.path.join(args.R2R_Aux_path, args.DENSE_OBJ_FEATURES1)
# dense_obj_feat2 = os.path.join(args.R2R_Aux_path, args.DENSE_OBJ_FEATURES2)
# bbox = os.path.join(args.R2R_Aux_path, args.BBOX_FEATURES)
if args.fast_train:
name, ext = os.path.splitext(features)
features = name + "-fast" + ext
feedback_method = args.feedback # teacher or sample
print(args)
def train_speaker(train_env, tok, n_iters, log_every=500, val_envs={}):
writer = SummaryWriter(logdir=log_dir)
listner = Seq2SeqAgent(train_env, "", tok, args.maxAction)
speaker = Speaker(train_env, listner, tok)
if args.fast_train:
log_every = 40
best_bleu = defaultdict(lambda: 0)
best_loss = defaultdict(lambda: 1232)
for idx in range(0, n_iters, log_every):
interval = min(log_every, n_iters - idx)
# Train for log_every interval
speaker.env = train_env
speaker.train(interval) # Train interval iters
print()
print("Iter: %d" % idx)
# Evaluation
for env_name, (env, evaluator) in val_envs.items():
if 'train' in env_name: # Ignore the large training set for the efficiency
continue
print("............ Evaluating %s ............." % env_name)
speaker.env = env
path2inst, loss, word_accu, sent_accu = speaker.valid()
path_id = next(iter(path2inst.keys()))
print("Inference: ", tok.decode_sentence(path2inst[path_id]))
print("GT: ", evaluator.gt[str(path_id)]['instructions'])
bleu_score, precisions = evaluator.bleu_score(path2inst)
# Tensorboard log
writer.add_scalar("bleu/%s" % (env_name), bleu_score, idx)
writer.add_scalar("loss/%s" % (env_name), loss, idx)
writer.add_scalar("word_accu/%s" % (env_name), word_accu, idx)
writer.add_scalar("sent_accu/%s" % (env_name), sent_accu, idx)
writer.add_scalar("bleu4/%s" % (env_name), precisions[3], idx)
# Save the model according to the bleu score
if bleu_score > best_bleu[env_name]:
best_bleu[env_name] = bleu_score
print('Save the model with %s BEST env bleu %0.4f' % (env_name, bleu_score))
speaker.save(idx, os.path.join(log_dir, 'state_dict', 'best_%s_bleu' % env_name))
if loss < best_loss[env_name]:
best_loss[env_name] = loss
print('Save the model with %s BEST env loss %0.4f' % (env_name, loss))
speaker.save(idx, os.path.join(log_dir, 'state_dict', 'best_%s_loss' % env_name))
# Screen print out
print("Bleu 1: %0.4f Bleu 2: %0.4f, Bleu 3 :%0.4f, Bleu 4: %0.4f" % tuple(precisions))
def train(train_env, tok, n_iters, log_every=100, val_envs={}, aug_env=None):
writer = SummaryWriter(logdir=log_dir)
listner = Seq2SeqAgent(train_env, "", tok, args.maxAction)
speaker = None
if args.self_train:
speaker = Speaker(train_env, listner, tok)
if args.speaker is not None:
if args.upload:
print("Load the speaker from %s." % args.speaker)
speaker.load(get_sync_dir(os.path.join(args.upload_path,args.speaker)))
else:
print("Load the speaker from %s." % args.speaker)
speaker.load(os.path.join(args.R2R_Aux_path, args.speaker))
start_iter = 0
if args.load is not None:
if args.upload:
refs_paths = get_outputs_refs_paths()['experiments'][0]
print(refs_paths)
load_model = os.path.join(refs_paths, args.load)
print(load_model)
print("LOAD THE listener from %s" % load_model)
start_iter = listner.load(load_model)
else:
print("LOAD THE listener from %s" % args.load)
start_iter = listner.load(os.path.join(args.R2R_Aux_path, args.load))
start = time.time()
best_val = {'val_seen': {"accu": 0., "state":"", 'update':False},
'val_unseen': {"accu": 0., "state":"", 'update':False}}
if args.fast_train:
log_every = 40
for idx in range(start_iter, start_iter+n_iters, log_every):
listner.logs = defaultdict(list)
interval = min(log_every, start_iter+n_iters-idx)
iter = idx + interval
# Train for log_every interval
if aug_env is None: # The default training process
listner.env = train_env
listner.train(interval, feedback=feedback_method) # Train interval iters
else:
if args.accumulate_grad:
for _ in range(interval // 2):
listner.zero_grad()
listner.env = train_env
# Train with GT data
args.ml_weight = 0.2
listner.accumulate_gradient(feedback_method)
listner.env = aug_env
# Train with Back Translation
args.ml_weight = 0.6 # Sem-Configuration
listner.accumulate_gradient(feedback_method, speaker=speaker)
listner.optim_step()
else:
for _ in range(interval // 2):
# Train with GT data
listner.env = train_env
args.ml_weight = 0.2
listner.train(1, feedback=feedback_method)
# Train with Back Translation
listner.env = aug_env
args.ml_weight = 0.6
listner.train(1, feedback=feedback_method, speaker=speaker)
# Log the training stats to tensorboard
total = max(sum(listner.logs['total']), 1)
length = max(len(listner.logs['critic_loss']), 1)
critic_loss = sum(listner.logs['critic_loss']) / total #/ length / args.batchSize
entropy = sum(listner.logs['entropy']) / total #/ length / args.batchSize
predict_loss = sum(listner.logs['us_loss']) / max(len(listner.logs['us_loss']), 1)
writer.add_scalar("loss/critic", critic_loss, idx)
writer.add_scalar("policy_entropy", entropy, idx)
writer.add_scalar("loss/unsupervised", predict_loss, idx)
writer.add_scalar("total_actions", total, idx)
writer.add_scalar("max_length", length, idx)
print("total_actions", total)
print("max_length", length)
# Run validation
loss_str = ""
for env_name, (env, evaluator) in val_envs.items():
listner.env = env
# Get validation loss under the same conditions as training
iters = None if args.fast_train or env_name != 'train' else 20 # 20 * 64 = 1280
# Get validation distance from goal under test evaluation conditions
listner.test(use_dropout=False, feedback='argmax', iters=iters)
result = listner.get_results()
score_summary, _ = evaluator.score(result)
loss_str += "%s " % env_name
for metric,val in score_summary.items():
if metric in ['success_rate']:
writer.add_scalar("%s/accuracy" % env_name, val, idx)
if env_name in best_val:
if val > best_val[env_name]['accu']:
best_val[env_name]['accu'] = val
best_val[env_name]['update'] = True
if metric in ['spl']:
writer.add_scalar("%s/spl" % env_name, val, idx)
loss_str += ', %s: %.3f' % (metric, val)
loss_str += '\n'
loss_str += '\n'
for env_name in best_val:
if best_val[env_name]['update']:
best_val[env_name]['state'] = 'Iter %d \n%s' % (iter, loss_str)
best_val[env_name]['update'] = False
file_dir = os.path.join(output_dir, "snap", args.name, "state_dict", "best_%s" % (env_name))
listner.save(idx, file_dir)
print(('%s (%d %d%%) \n%s' % (timeSince(start, float(iter)/n_iters),
iter, float(iter)/n_iters*100, loss_str)))
if iter % 1000 == 0:
print("BEST RESULT TILL NOW")
for env_name in best_val:
print(env_name, best_val[env_name]['state'])
if iter % 40000 == 0:
file_dir = os.path.join(output_dir, "snap", args.name, "state_dict", "Iter_%06d" % (iter))
listner.save(idx, file_dir)
# file_dir = os.path.join(output_dir, "snap", args.name, "state_dict", "LAST_iter%d" % (idx))
# listner.save(idx, file_dir)
def valid(train_env, tok, val_envs={}):
agent = Seq2SeqAgent(train_env, "", tok, args.maxAction)
if args.upload:
print("Loaded the listener model at iter %d from %s" % (agent.load(load_model), load_model))
else:
print("Loaded the listener model at iter %d from %s" % (agent.load(os.path.join(args.R2R_Aux_path, args.load)),
os.path.join(args.R2R_Aux_path, args.load)))
for env_name, (env, evaluator) in val_envs.items():
agent.logs = defaultdict(list)
agent.env = env
iters = None
agent.test(use_dropout=False, feedback='argmax', iters=iters)
result = agent.get_results()
if env_name != '':
score_summary, _ = evaluator.score(result)
loss_str = "Env name: %s" % env_name
for metric,val in score_summary.items():
loss_str += ', %s: %.4f' % (metric, val)
print(loss_str)
if args.submit:
json.dump(
result,
open(os.path.join(log_dir, "submit_%s.json" % env_name), 'w'),
sort_keys=True, indent=4, separators=(',', ': ')
)
def beam_valid(train_env, tok, val_envs={}):
listener = Seq2SeqAgent(train_env, "", tok, args.maxAction)
speaker = Speaker(train_env, listener, tok)
if args.speaker is not None:
print("Load the speaker from %s." % args.speaker)
speaker.load(args.speaker)
print("Loaded the listener model at iter % d" % listener.load(args.load))
final_log = ""
for env_name, (env, evaluator) in val_envs.items():
listener.logs = defaultdict(list)
listener.env = env
listener.beam_search_test(speaker)
results = listener.results
def cal_score(x, alpha, avg_speaker, avg_listener):
speaker_score = sum(x["speaker_scores"]) * alpha
if avg_speaker:
speaker_score /= len(x["speaker_scores"])
# normalizer = sum(math.log(k) for k in x['listener_actions'])
normalizer = 0.
listener_score = (sum(x["listener_scores"]) + normalizer) * (1-alpha)
if avg_listener:
listener_score /= len(x["listener_scores"])
return speaker_score + listener_score
if args.param_search:
# Search for the best speaker / listener ratio
interval = 0.01
logs = []
for avg_speaker in [False, True]:
for avg_listener in [False, True]:
for alpha in np.arange(0, 1 + interval, interval):
result_for_eval = []
for key in results:
result_for_eval.append({
"instr_id": key,
"trajectory": max(results[key]['paths'],
key=lambda x: cal_score(x, alpha, avg_speaker, avg_listener)
)['trajectory']
})
score_summary, _ = evaluator.score(result_for_eval)
for metric,val in score_summary.items():
if metric in ['success_rate']:
print("Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f" %
(avg_speaker, avg_listener, alpha, val))
logs.append((avg_speaker, avg_listener, alpha, val))
tmp_result = "Env Name %s\n" % (env_name) + \
"Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f\n" % max(logs, key=lambda x: x[3])
print(tmp_result)
# print("Env Name %s" % (env_name))
# print("Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f" %
# max(logs, key=lambda x: x[3]))
final_log += tmp_result
print()
else:
avg_speaker = True
avg_listener = True
alpha = args.alpha
result_for_eval = []
for key in results:
result_for_eval.append({
"instr_id": key,
"trajectory": [(vp, 0, 0) for vp in results[key]['dijk_path']] + \
max(results[key]['paths'],
key=lambda x: cal_score(x, alpha, avg_speaker, avg_listener)
)['trajectory']
})
# result_for_eval = utils.add_exploration(result_for_eval)
score_summary, _ = evaluator.score(result_for_eval)
if env_name != 'test':
loss_str = "Env Name: %s" % env_name
for metric, val in score_summary.items():
if metric in ['success_rate']:
print("Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f" %
(avg_speaker, avg_listener, alpha, val))
loss_str += ",%s: %0.4f " % (metric, val)
print(loss_str)
print()
if args.submit:
json.dump(
result_for_eval,
open(os.path.join(log_dir, "submit_%s.json" % env_name), 'w'),
sort_keys=True, indent=4, separators=(',', ': ')
)
print(final_log)
def setup():
torch.manual_seed(1)
torch.cuda.manual_seed(1)
# Check for vocabs
if not os.path.exists(train_vocab):
write_vocab(build_vocab(splits=['train']), train_vocab)
if not os.path.exists(trainval_vocab):
write_vocab(build_vocab(splits=['train','val_seen','val_unseen']), trainval_vocab)
def train_val():
''' Train on the training set, and validate on seen and unseen splits. '''
# args.fast_train = True
setup()
# Create a batch training environment that will also preprocess text
vocab = read_vocab(train_vocab)
tok = Tokenizer(vocab=vocab, encoding_length=args.maxInput)
feat_dict = read_img_features(features)
featurized_scans = set([key.split("_")[0] for key in list(feat_dict.keys())])
train_env = R2RBatch(feat_dict, batch_size=args.batchSize, splits=['train'], tokenizer=tok)
from collections import OrderedDict
val_env_names = ['val_unseen', 'val_seen']
if args.submit:
val_env_names.append('test')
else:
pass
#val_env_names.append('train')
if not args.beam:
val_env_names.append("train")
val_envs = OrderedDict(
((split,
(R2RBatch(feat_dict, batch_size=args.batchSize, splits=[split], tokenizer=tok),
Evaluation([split], featurized_scans, tok))
)
for split in val_env_names
)
)
if args.train == 'listener':
train(train_env, tok, args.iters, val_envs=val_envs)
elif args.train == 'validlistener':
if args.beam:
beam_valid(train_env, tok, val_envs=val_envs)
else:
valid(train_env, tok, val_envs=val_envs)
elif args.train == 'speaker':
train_speaker(train_env, tok, args.iters, val_envs=val_envs)
elif args.train == 'validspeaker':
valid_speaker(tok, val_envs)
else:
assert False
def valid_speaker(tok, val_envs):
import tqdm
listner = Seq2SeqAgent(None, "", tok, args.maxAction)
speaker = Speaker(None, listner, tok)
speaker.load(args.load)
for env_name, (env, evaluator) in val_envs.items():
if env_name == 'train':
continue
print("............ Evaluating %s ............." % env_name)
speaker.env = env
path2inst, loss, word_accu, sent_accu = speaker.valid(wrapper=tqdm.tqdm)
path_id = next(iter(path2inst.keys()))
print("Inference: ", tok.decode_sentence(path2inst[path_id]))
print("GT: ", evaluator.gt[path_id]['instructions'])
pathXinst = list(path2inst.items())
name2score = evaluator.lang_eval(pathXinst, no_metrics={'METEOR'})
score_string = " "
for score_name, score in name2score.items():
score_string += "%s_%s: %0.4f " % (env_name, score_name, score)
print("For env %s" % env_name)
print(score_string)
print("Average Length %0.4f" % utils.average_length(path2inst))
def train_val_augment():
"""
Train the listener with the augmented data
"""
setup()
# Create a batch training environment that will also preprocess text
vocab = read_vocab(train_vocab)
tok = Tokenizer(vocab=vocab, encoding_length=args.maxInput)
# Load the env img features
feat_dict = read_img_features(features)
featurized_scans = set([key.split("_")[0] for key in list(feat_dict.keys())])
# Load the augmentation data
if args.upload:
aug_path = get_sync_dir(os.path.join(args.upload_path, args.aug))
else:
aux_path = os.path.join(args.R2R_Aux_path, args.aug)
# Create the training environment
train_env = R2RBatch(feat_dict, batch_size=args.batchSize,
splits=['train'], tokenizer=tok)
aug_env = R2RBatch(feat_dict, batch_size=args.batchSize,
splits=[aug_path], tokenizer=tok, name='aug')
# Printing out the statistics of the dataset
stats = train_env.get_statistics()
print("The training data_size is : %d" % train_env.size())
print("The average instruction length of the dataset is %0.4f." % (stats['length']))
print("The average action length of the dataset is %0.4f." % (stats['path']))
stats = aug_env.get_statistics()
print("The augmentation data size is %d" % aug_env.size())
print("The average instruction length of the dataset is %0.4f." % (stats['length']))
print("The average action length of the dataset is %0.4f." % (stats['path']))
# Setup the validation data
val_envs = {split: (R2RBatch(feat_dict, batch_size=args.batchSize, splits=[split],
tokenizer=tok), Evaluation([split], featurized_scans, tok))
for split in ['train', 'val_seen', 'val_unseen']}
# Start training
train(train_env, tok, args.iters, val_envs=val_envs, aug_env=aug_env)
if __name__ == "__main__":
if args.train in ['speaker', 'rlspeaker', 'validspeaker',
'listener', 'validlistener']:
train_val()
elif args.train == 'auglistener':
train_val_augment()
else:
assert False
|
py | 1a337c73e38da8c7ac9f40e22f9a0027cd0a050f | # coding: utf-8
"""
Sentim's Emotion APIs
An emotion recognition api that tells you the emotion of text, and not just the connotation. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ResultItem(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'index': 'int',
'emotion': 'str',
'emotion_score': 'EmotionScore'
}
attribute_map = {
'index': 'Index',
'emotion': 'Emotion',
'emotion_score': 'EmotionScore'
}
def __init__(self, index=None, emotion=None, emotion_score=None): # noqa: E501
"""ResultItem - a model defined in OpenAPI""" # noqa: E501
self._index = None
self._emotion = None
self._emotion_score = None
self.discriminator = None
if index is not None:
self.index = index
if emotion is not None:
self.emotion = emotion
if emotion_score is not None:
self.emotion_score = emotion_score
@property
def index(self):
"""Gets the index of this ResultItem. # noqa: E501
The index of the conversation or list that was classified. # noqa: E501
:return: The index of this ResultItem. # noqa: E501
:rtype: int
"""
return self._index
@index.setter
def index(self, index):
"""Sets the index of this ResultItem.
The index of the conversation or list that was classified. # noqa: E501
:param index: The index of this ResultItem. # noqa: E501
:type: int
"""
self._index = index
@property
def emotion(self):
"""Gets the emotion of this ResultItem. # noqa: E501
The classified emotion of the message. # noqa: E501
:return: The emotion of this ResultItem. # noqa: E501
:rtype: str
"""
return self._emotion
@emotion.setter
def emotion(self, emotion):
"""Sets the emotion of this ResultItem.
The classified emotion of the message. # noqa: E501
:param emotion: The emotion of this ResultItem. # noqa: E501
:type: str
"""
self._emotion = emotion
@property
def emotion_score(self):
"""Gets the emotion_score of this ResultItem. # noqa: E501
:return: The emotion_score of this ResultItem. # noqa: E501
:rtype: EmotionScore
"""
return self._emotion_score
@emotion_score.setter
def emotion_score(self, emotion_score):
"""Sets the emotion_score of this ResultItem.
:param emotion_score: The emotion_score of this ResultItem. # noqa: E501
:type: EmotionScore
"""
self._emotion_score = emotion_score
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResultItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a337cc2312e05cfb34e7de791955bc50c07de9c | # ---------------------------------------------------------------------
# f5.BIGIP.get_interfaces
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
from collections import defaultdict
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfaces import IGetInterfaces
class Script(BaseScript):
name = "f5.BIGIP.get_interfaces"
cache = True
interface = IGetInterfaces
rx_self = re.compile(r"^net self \S+ {", re.MULTILINE | re.DOTALL)
rx_self_a = re.compile(
r"^\s+address\s+(?P<address>\S+).+" r"^\s+vlan\s+(?P<vlan>\S+)", re.DOTALL | re.MULTILINE
)
def parse_kv(self, s):
r = {}
for l in s.splitlines():
k, v = l.rsplit(" ", 1)
r[k.strip()] = v
return r
def execute(self):
# Get self ip
addresses = defaultdict(list)
v = self.cli("list /net self")
for data in self.rx_self.split(v):
match = self.rx_self_a.search(data)
if match:
addresses[match.group("vlan")] += [match.group("address")]
# Get VLAN mappings
vlans = {} # tag -> data
trunks = {} # name -> [members]
aggregated = {} # name -> aggregated interface
current_vlan = None
current_trunk = None
lacp_interfaces = set()
interfaces = set()
v = self.cli("show /net vlan")
for h, data in self.parse_blocks(v):
if h.startswith("Net::Vlan: "):
d = self.parse_kv(data)
name = h[11:]
current_vlan = {
"name": name,
"mac": d.get("Mac Address (True)"),
"mtu": d.get("MTU"),
"tag": d.get("Tag"),
"tagged": [],
"untagged": [],
"ipv4_addresses": [a for a in addresses[name] if ":" not in a],
"ipv6_addresses": [a for a in addresses[name] if ":" in a],
}
vlans[name] = current_vlan
current_trunk = None
elif h.startswith("Net::Vlan-Member: "):
name = h[18:]
d = self.parse_kv(data)
tagged = d.get("Tagged") == "yes"
if tagged:
current_vlan["tagged"] += [name]
else:
current_vlan["untagged"] += [name]
interfaces.add(name)
elif h.startswith("Net::Trunk"):
name = data.splitlines()[0].split(" ", 1)[0]
current_trunk = {"name": name, "members": []}
trunks[name] = current_trunk
interfaces.add(name)
elif h.startswith("Net::Interface"):
if current_trunk:
for l in data.splitlines():
i = l.split(" ", 1)[0]
current_trunk["members"] += [i]
interfaces.add(i)
aggregated[i] = current_trunk["name"]
elif h.startswith("Net::LACP Status (interface: "):
name = h[29:-1]
lacp_interfaces.add(name)
# Build result
ifaces = []
tagged = defaultdict(list) # interface -> [vlans]
untagged = {} # interface -> vlan
for vlan in vlans:
# SVI
v = vlans[vlan]
enabled_afi = []
tag = int(v["tag"])
if v["ipv4_addresses"]:
enabled_afi += ["IPv4"]
if v["ipv6_addresses"]:
enabled_afi += ["IPv6"]
if enabled_afi:
iface = {
"name": v["name"],
"type": "SVI",
"mac": v["mac"],
"mtu": v["mtu"],
"admin_status": True,
"oper_status": True,
"subinterfaces": [
{
"name": v["name"],
"vlan_ids": [tag],
"enabled_afi": enabled_afi,
"ipv4_addresses": v["ipv4_addresses"],
"ipv6_addresses": v["ipv6_addresses"],
"admin_status": True,
"oper_status": True,
}
],
}
ifaces += [iface]
for i in v["tagged"]:
tagged[i] += [tag]
for i in v["untagged"]:
untagged[i] = tag
for i in interfaces:
itype = "physical" if i not in trunks else "aggregated"
iface = {
"name": i,
"type": itype,
# "mac": v["mac"],
# "mtu": v["mtu"],
"admin_status": True,
"oper_status": True,
"enabled_protocols": [],
"subinterfaces": [],
}
if i in tagged or i in untagged:
si = {
"name": i,
"enabled_afi": ["BRIDGE"],
"admin_status": True,
"oper_status": True,
}
if i in tagged:
si["tagged_vlans"] = sorted(tagged[i])
if i in untagged:
si["untagged_vlan"] = untagged[i]
iface["subinterfaces"] = [si]
if i in lacp_interfaces:
iface["enabled_protocols"] += ["LACP"]
if i in aggregated:
iface["aggregated_interface"] = aggregated[i]
ifaces += [iface]
return [{"interfaces": sorted(ifaces, key=lambda x: x["name"])}]
|
py | 1a337d7a4a50d068ddd0566dc336a3a576ed2533 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAzureMgmtContainerregistry(PythonPackage):
"""Microsoft Azure Container Registry Client Library for Python."""
homepage = "https://github.com/Azure/azure-sdk-for-python"
url = "https://pypi.io/packages/source/a/azure-mgmt-containerregistry/azure-mgmt-containerregistry-2.8.0.zip"
# Release candidate needed for py-azure-cli
version('3.0.0rc14', sha256='d23ce93ec5903d00f79f0ac995e16bf47197130239f7f182509add3277b73071')
version('2.8.0', sha256='b24be1050d54f3158e8be7f6ad677f0c8888dddefd09fb8391ebfc73d40173a4', preferred=True)
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:1.999', type=('build', 'run'))
depends_on('[email protected]:1.999', type=('build', 'run'))
depends_on('py-azure-mgmt-nspkg', when='^python@:2', type=('build', 'run'))
|
py | 1a337e03c86ab43d75c5dbbddd34dcfdeeb53c3a | # Copyright (c) Nanjing University, Vision Lab.
# Jianqiang Wang ([email protected]), Zhan Ma ([email protected]); Nanjing University, Vision Lab.
# Last update: 2020.06.06
import numpy as np
import h5py
import os, sys
import torch
import torch.nn as nn
import MinkowskiEngine as ME
import MinkowskiEngine.MinkowskiFunctional as MF
from models.BasicBlock import ResNet, InceptionResNet
import time
class Encoder(nn.Module):
"""
Encoder
"""
def __init__(self, channels, block_layers, block):
nn.Module.__init__(self)
in_nchannels=1
ch = [16, 32, 64, 32, channels]
if block == 'ResNet':
self.block = ResNet
elif block == 'InceptionResNet':
self.block = InceptionResNet
self.conv0 = ME.MinkowskiConvolution(
in_channels=in_nchannels,
out_channels=ch[0],
kernel_size=3,
stride=1,
bias=True,
dimension=3)
self.down0 = ME.MinkowskiConvolution(
in_channels=ch[0],
out_channels=ch[1],
kernel_size=2,
stride=2,
bias=True,
dimension=3)
self.block0 = self.make_layer(
self.block, block_layers, ch[1])
self.conv1 = ME.MinkowskiConvolution(
in_channels=ch[1],
out_channels=ch[1],
kernel_size=3,
stride=1,
bias=True,
dimension=3)
self.down1 = ME.MinkowskiConvolution(
in_channels=ch[1],
out_channels=ch[2],
kernel_size=2,
stride=2,
bias=True,
dimension=3)
self.block1 = self.make_layer(
self.block, block_layers, ch[2])
self.conv2 = ME.MinkowskiConvolution(
in_channels=ch[2],
out_channels=ch[2],
kernel_size=3,
stride=1,
bias=True,
dimension=3)
self.down2 = ME.MinkowskiConvolution(
in_channels=ch[2],
out_channels=ch[3],
kernel_size=2,
stride=2,
bias=True,
dimension=3)
self.block2 = self.make_layer(
self.block, block_layers, ch[3])
self.conv3 = ME.MinkowskiConvolution(
in_channels=ch[3],
out_channels=ch[4],
kernel_size=3,
stride=1,
bias=True,
dimension=3)
self.relu = ME.MinkowskiReLU(inplace=True)
def make_layer(self, block, block_layers, channels):
layers = []
for i in range(block_layers):
layers.append(block(channels=channels))
return nn.Sequential(*layers)
def forward(self, x):
out0 = self.relu(self.down0(self.relu(self.conv0(x))))
out0 = self.block0(out0)
out1 = self.relu(self.down1(self.relu(self.conv1(out0))))
out1 = self.block1(out1)
out2 = self.relu(self.down2(self.relu(self.conv2(out1))))
out2 = self.block2(out2)
out2 = self.conv3(out2)
return [out2, out1, out0]
class Decoder(nn.Module):
"""
Decoder
"""
def __init__(self, channels, block_layers, block):
nn.Module.__init__(self)
out_nchannel=1
ch = [channels, 64, 32, 16]
if block == 'ResNet':
self.block = ResNet
elif block == 'InceptionResNet':
self.block = InceptionResNet
self.up0 = ME.MinkowskiGenerativeConvolutionTranspose(
in_channels=ch[0],
out_channels=ch[1],
kernel_size= 2,
stride=2,
bias=True,
dimension=3)
self.conv0 = ME.MinkowskiConvolution(
in_channels=ch[1],
out_channels=ch[1],
kernel_size= 3,
stride=1,
bias=True,
dimension=3)
self.block0 = self.make_layer(
self.block, block_layers, ch[1])
self.conv0_cls = ME.MinkowskiConvolution(
in_channels=ch[1],
out_channels=out_nchannel,
kernel_size= 3,
stride=1,
bias=True,
dimension=3)
self.up1 = ME.MinkowskiGenerativeConvolutionTranspose(
in_channels=ch[1],
out_channels=ch[2],
kernel_size= 2,
stride=2,
bias=True,
dimension=3)
self.conv1 = ME.MinkowskiConvolution(
in_channels=ch[2],
out_channels=ch[2],
kernel_size= 3,
stride=1,
bias=True,
dimension=3)
self.block1 = self.make_layer(
self.block, block_layers, ch[2])
self.conv1_cls = ME.MinkowskiConvolution(
in_channels=ch[2],
out_channels=out_nchannel,
kernel_size= 3,
stride=1,
bias=True,
dimension=3)
self.up2 = ME.MinkowskiGenerativeConvolutionTranspose(
in_channels=ch[2],
out_channels=ch[3],
kernel_size= 2,
stride=2,
bias=True,
dimension=3)
self.conv2 = ME.MinkowskiConvolution(
in_channels=ch[3],
out_channels=ch[3],
kernel_size= 3,
stride=1,
bias=True,
dimension=3)
self.block2 = self.make_layer(
self.block, block_layers, ch[3])
self.conv2_cls = ME.MinkowskiConvolution(
in_channels=ch[3],
out_channels=out_nchannel,
kernel_size= 3,
stride=1,
bias=True,
dimension=3)
self.relu = ME.MinkowskiReLU(inplace=True)
# self.relu = ME.MinkowskiELU(inplace=True)
# pruning
self.pruning = ME.MinkowskiPruning()
def make_layer(self, block, block_layers, channels):
layers = []
for i in range(block_layers):
layers.append(block(channels=channels))
return nn.Sequential(*layers)
# get target from label key or sparse tensor.
def get_target_by_key(self, out, target_key):
with torch.no_grad():
target = torch.zeros(len(out), dtype=torch.bool)
cm = out.coords_man
strided_target_key = cm.stride(
target_key, out.tensor_stride[0], force_creation=True)
# kernel size = 1
ins, outs = cm.get_kernel_map(
out.coords_key,
strided_target_key,
kernel_size=1,
region_type=1)
for curr_in in ins:
target[curr_in] = 1
return target.bool()
def get_target_by_sp_tensor(self, out, target_sp_tensor):
with torch.no_grad():
def ravel_multi_index(coords, step):
coords = coords.long()
step = step.long()
coords_sum = coords[:, 0] \
+ coords[:, 1]*step \
+ coords[:, 2]*step*step \
+ coords[:, 3]*step*step*step
return coords_sum
step = max(out.C.max(), target_sp_tensor.C.max()) + 1
out_sp_tensor_coords_1d = ravel_multi_index(out.C, step)
in_sp_tensor_coords_1d = ravel_multi_index(target_sp_tensor.C, step)
# test whether each element of a 1-D array is also present in a second array.
target = np.in1d(out_sp_tensor_coords_1d.cpu().numpy(),
in_sp_tensor_coords_1d.cpu().numpy())
return torch.Tensor(target).bool()
def get_coords_nums_by_key(self, out, target_key):
with torch.no_grad():
cm = out.coords_man
strided_target_key = cm.stride(target_key, out.tensor_stride[0], force_creation=True)
ins, outs = cm.get_kernel_map(
out.coords_key,
strided_target_key,
kernel_size=1,
region_type=1)
row_indices_per_batch = cm.get_row_indices_per_batch(out.coords_key)
coords_nums = [len(np.in1d(row_indices,ins[0]).nonzero()[0]) for _, row_indices in enumerate(row_indices_per_batch)]
# coords_nums = [len(np.intersect1d(row_indices,ins[0])) for _, row_indices in enumerate(row_indices_per_batch)]
return coords_nums
def keep_adaptive(self, out, coords_nums, rho=1.0):
with torch.no_grad():
keep = torch.zeros(len(out), dtype=torch.bool)
# get row indices per batch.
# row_indices_per_batch = out.coords_man.get_row_indices_per_batch(out.coords_key)
row_indices_per_batch = out._batchwise_row_indices
for row_indices, ori_coords_num in zip(row_indices_per_batch, coords_nums):
coords_num = min(len(row_indices), ori_coords_num*rho)# select top k points.
values, indices = torch.topk(out.F[row_indices].squeeze(), int(coords_num))
keep[row_indices[indices]]=True
return keep
def forward(self, x, target_label, adaptive, rhos=[1.0, 1.0, 1.0], training=True):
if isinstance(target_label, ME.CoordinateMapKey):
target_format = 'key'
elif isinstance(target_label, list):
if isinstance(target_label[0], ME.SparseTensor):
target_format = 'sp_tensor'
elif isinstance(target_label[0], int):
target_format = 'num'
else:
print('Target Label Format Error!')
sys.exit(0)
targets = []
out_cls = []
keeps = []
# Decode 0.
out0 = self.relu(self.conv0(self.relu(self.up0(x))))
out0 = self.block0(out0)
out0_cls = self.conv0_cls(out0)
# get target 0.
if target_format == 'key':
target0 = self.get_target_by_key(out0, target_label)
elif target_format == 'sp_tensor':
target0 = self.get_target_by_sp_tensor(out0, target_label[0])
elif target_format == 'num':
target0 = target_label[0]
targets.append(target0)
out_cls.append(out0_cls)
# get keep 0.
if adaptive:
if target_format == 'key':
coords_nums0 = self.get_coords_nums_by_key(out0, target_label)
elif target_format == 'sp_tensor':
coords_nums0 = [len(coords) for coords in target_label[0].decomposed_coordinates]
elif target_format == 'num':
coords_nums0 = [target_label[0]]
keep0 = self.keep_adaptive(out0_cls, coords_nums0, rho=rhos[0])
else:
keep0 = (out0_cls.F > 0).cpu().squeeze()
if out0_cls.F.max() < 0:
# keep at least one points.
print('===0; max value < 0', out0_cls.F.max())
_, idx = torch.topk(out0_cls.F.squeeze(), 1)
keep0[idx] = True
keeps.append(keep0)
# If training, force target shape generation, use net.eval() to disable
if training:
keep0 += target0
# Remove voxels
out0_pruned = self.pruning(out0, keep0.to(out0.device))
# Decode 1.
out1 = self.relu(self.conv1(self.relu(self.up1(out0_pruned))))
out1 = self.block1(out1)
out1_cls = self.conv1_cls(out1)
# get target 1.
if target_format == 'key':
target1 = self.get_target_by_key(out1, target_label)
elif target_format == 'sp_tensor':
target1 = self.get_target_by_sp_tensor(out1, target_label[1])
elif target_format == 'num':
target1 = target_label[1]
targets.append(target1)
out_cls.append(out1_cls)
# get keep 1.
if adaptive:
if target_format == 'key':
coords_nums1 = self.get_coords_nums_by_key(out1, target_label)
elif target_format == 'sp_tensor':
coords_nums1 = [len(coords) for coords in target_label[1].decomposed_coordinates]
elif target_format == 'num':
coords_nums1 = [target_label[1]]
keep1 = self.keep_adaptive(out1_cls, coords_nums1, rho=rhos[1])
else:
keep1 = (out1_cls.F > 0).cpu().squeeze()
if out1_cls.F.max() < 0:
# keep at least one points.
print('===1; max value < 0', out1_cls.F.max())
_, idx = torch.topk(out1_cls.F.squeeze(), 1)
keep1[idx] = True
keeps.append(keep1)
if training:
keep1 += target1
# Remove voxels
out1_pruned = self.pruning(out1, keep1.to(out1.device))
# Decode 2.
out2 = self.relu(self.conv2(self.relu(self.up2(out1_pruned))))
out2 = self.block2(out2)
out2_cls = self.conv2_cls(out2)
# get target 2.
if target_format == 'key':
target2 = self.get_target_by_key(out2, target_label)
elif target_format == 'sp_tensor':
target2 = self.get_target_by_sp_tensor(out2, target_label[2])
elif target_format == 'num':
target2 = target_label[2]
targets.append(target2)
out_cls.append(out2_cls)
# get keep 2.
if adaptive:
if target_format == 'key':
coords_nums2 = self.get_coords_nums_by_key(out2, target_label)
elif target_format == 'sp_tensor':
coords_nums2 = [len(coords) for coords in target_label[2].decomposed_coordinates]
elif target_format == 'num':
coords_nums2 = [target_label[2]]
keep2 = self.keep_adaptive(out2_cls, coords_nums2, rho=rhos[2])
else:
keep2 = (out2_cls.F > 0).cpu().squeeze()
if out2_cls.F.max() < 0:
# keep at least one points.
print('===2; max value < 0', out2_cls.F.max())
_, idx = torch.topk(out2_cls.F.squeeze(), 1)
keep2[idx] = True
keeps.append(keep2)
# Remove voxels
out2_pruned = self.pruning(out2_cls, keep2.to(out2_cls.device))
return out2_pruned, out_cls, targets, keeps
if __name__ == '__main__':
encoder = Encoder(8)
print(encoder)
decoder = Decoder(8)
print(decoder)
|
py | 1a337ebf3d2a86cd14f219f3c9b89af5616c8f7e | # Link class
class Link:
## Constructor ##
def __init__(self, text = "None", url = "None", status_code = 000): # Not the keyword 'None' so it will still print something
# Dictionary of URL-related content
self.text = text
self.url = url
self.status_code = status_code
# Trims the inside of a string (removing extra whitespace between words)
def trim_inner(self, text):
return " ".join( [string.strip() for string in text.split()] ) # Separate the indiv. words and trim them individually
## OVERLOADS ##
# String representation of the 'Link' class
def __str__(self):
# Format: status code, hyperlinked text, then url (CSV)
text = self.trim_inner(self.text) # Trim internal whitespace;
if not text: # If the string is blank,
text = "N/A" # Give it some text
return f"{self.status_code}, {text}, {self.url}"
# Relational Operators, compared by status code for sorting
# > (less than)
def __lt__(self, other):
return self.status_code < other.status_code
# >= (less than or equal to)
def __le__(self, other):
return self.status_code <= other.status_code
# == (is equal to)
def __eq__(self, other):
return self.staus_code == other.status_code
# != (is not equal to)
def __ne__(self, other):
return self.status_code != other.status_code
# < (greater than)
def __gt__(self, other):
return self.status_code > other.status_code
# <= (greater than or equal to)
def __ge__(self, other):
return self.status_code >= other.status_code
# End of Link class
|
py | 1a337ec317f25bbc9aadf3ac8dc7e07f0000d4ca | #
# Copyright 2017 XebiaLabs, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from apigee import ApigeeClient, setup_urllib
setup_urllib()
revision_name = None
client = ApigeeClient(deployed.container.org, deployed.container)
response = client.import_shared_flow(deployed.deployable.name, deployed.file.path)
try:
print response.json()
revision_name = response.json()['revision']
print("Shared flow imported as revision number: " + revision_name)
unwrapped_deployed = unwrap(deployed)
unwrapped_deployed.setProperty("revisionNumber", revision_name)
except ValueError:
print("No JSON returned after importing the shared flow %s" % deployed.deployable.name)
if revision_name is not None:
response = client.deploy_shared_flow(deployed.deployable.name, revision_name)
print(response.text)
else:
print("The shared flow %s is not imported. Therefore we will not deploy it" % (deployed.deployable.name))
|
py | 1a337fac097b723eba3bbc03b1a8ba5d3cdb9c53 | # Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for audio streams."""
import array
import logging
import math
import time
import threading
import wave
import click
import sounddevice as sd
DEFAULT_AUDIO_SAMPLE_RATE = 16000
DEFAULT_AUDIO_SAMPLE_WIDTH = 2
DEFAULT_AUDIO_ITER_SIZE = 3200
DEFAULT_AUDIO_DEVICE_BLOCK_SIZE = 6400
DEFAULT_AUDIO_DEVICE_FLUSH_SIZE = 25600
def normalize_audio_buffer(buf, volume_percentage, sample_width=2):
"""Adjusts the loudness of the audio data in the given buffer.
Volume normalization is done by scaling the amplitude of the audio
in the buffer by a scale factor of 2^(volume_percentage/100)-1.
For example, 50% volume scales the amplitude by a factor of 0.414,
and 75% volume scales the amplitude by a factor of 0.681.
For now we only sample_width 2.
Args:
buf: byte string containing audio data to normalize.
volume_percentage: volume setting as an integer percentage (1-100).
sample_width: size of a single sample in bytes.
"""
if sample_width != 2:
raise Exception('unsupported sample width:', sample_width)
scale = math.pow(2, 1.0*volume_percentage/100)-1
# Construct array from bytes based on sample_width, multiply by scale
# and convert it back to bytes
arr = array.array('h', buf)
for idx in range(0, len(arr)):
arr[idx] = int(arr[idx]*scale)
buf = arr.tostring()
return buf
def align_buf(buf, sample_width):
"""In case of buffer size not aligned to sample_width pad it with 0s"""
remainder = len(buf) % sample_width
if remainder != 0:
buf += b'\0' * (sample_width - remainder)
return buf
class WaveSource(object):
"""Audio source that reads audio data from a WAV file.
Reads are throttled to emulate the given sample rate and silence
is returned when the end of the file is reached.
Args:
fp: file-like stream object to read from.
sample_rate: sample rate in hertz.
sample_width: size of a single sample in bytes.
"""
def __init__(self, fp, sample_rate, sample_width):
self._fp = fp
try:
self._wavep = wave.open(self._fp, 'r')
except wave.Error as e:
logging.warning('error opening WAV file: %s, '
'falling back to RAW format', e)
self._fp.seek(0)
self._wavep = None
self._sample_rate = sample_rate
self._sample_width = sample_width
self._sleep_until = 0
def read(self, size):
"""Read bytes from the stream and block until sample rate is achieved.
Args:
size: number of bytes to read from the stream.
"""
now = time.time()
missing_dt = self._sleep_until - now
if missing_dt > 0:
time.sleep(missing_dt)
self._sleep_until = time.time() + self._sleep_time(size)
data = (self._wavep.readframes(size)
if self._wavep
else self._fp.read(size))
# When reach end of audio stream, pad remainder with silence (zeros).
if not data:
return b'\x00' * size
return data
def close(self):
"""Close the underlying stream."""
if self._wavep:
self._wavep.close()
self._fp.close()
def _sleep_time(self, size):
sample_count = size / float(self._sample_width)
sample_rate_dt = sample_count / float(self._sample_rate)
return sample_rate_dt
def start(self):
pass
def stop(self):
pass
@property
def sample_rate(self):
return self._sample_rate
class WaveSink(object):
"""Audio sink that writes audio data to a WAV file.
Args:
fp: file-like stream object to write data to.
sample_rate: sample rate in hertz.
sample_width: size of a single sample in bytes.
"""
def __init__(self, fp, sample_rate, sample_width):
self._fp = fp
self._wavep = wave.open(self._fp, 'wb')
self._wavep.setsampwidth(sample_width)
self._wavep.setnchannels(1)
self._wavep.setframerate(sample_rate)
def write(self, data):
"""Write bytes to the stream.
Args:
data: frame data to write.
"""
self._wavep.writeframes(data)
def close(self):
"""Close the underlying stream."""
self._wavep.close()
self._fp.close()
def start(self):
pass
def stop(self):
pass
def flush(self):
pass
class SoundDeviceStream(object):
"""Audio stream based on an underlying sound device.
It can be used as an audio source (read) and a audio sink (write).
Args:
sample_rate: sample rate in hertz.
sample_width: size of a single sample in bytes.
block_size: size in bytes of each read and write operation.
flush_size: size in bytes of silence data written during flush operation.
"""
def __init__(self, sample_rate, sample_width, block_size, flush_size):
if sample_width == 2:
audio_format = 'int16'
else:
raise Exception('unsupported sample width:', sample_width)
self._audio_stream = sd.RawStream(
samplerate=sample_rate, dtype=audio_format, channels=1,
blocksize=int(block_size/2), # blocksize is in number of frames.
)
self._block_size = block_size
self._flush_size = flush_size
self._sample_rate = sample_rate
def read(self, size):
"""Read bytes from the stream."""
buf, overflow = self._audio_stream.read(size)
if overflow:
logging.warning('SoundDeviceStream read overflow (%d, %d)',
size, len(buf))
return bytes(buf)
def write(self, buf):
"""Write bytes to the stream."""
underflow = self._audio_stream.write(buf)
if underflow:
logging.warning('SoundDeviceStream write underflow (size: %d)',
len(buf))
return len(buf)
def flush(self):
if self._audio_stream.active and self._flush_size > 0:
self._audio_stream.write(b'\x00' * self._flush_size)
def start(self):
"""Start the underlying stream."""
if not self._audio_stream.active:
self._audio_stream.start()
def stop(self):
"""Stop the underlying stream."""
if self._audio_stream.active:
self._audio_stream.stop()
def close(self):
"""Close the underlying stream and audio interface."""
if self._audio_stream:
self.stop()
self._audio_stream.close()
self._audio_stream = None
@property
def sample_rate(self):
return self._sample_rate
class ConversationStream(object):
"""Audio stream that supports half-duplex conversation.
A conversation is the alternance of:
- a recording operation
- a playback operation
Excepted usage:
For each conversation:
- start_recording()
- read() or iter()
- stop_recording()
- start_playback()
- write()
- stop_playback()
When conversations are finished:
- close()
Args:
source: file-like stream object to read input audio bytes from.
sink: file-like stream object to write output audio bytes to.
iter_size: read size in bytes for each iteration.
sample_width: size of a single sample in bytes.
"""
def __init__(self, source, sink, iter_size, sample_width):
self._source = source
self._sink = sink
self._iter_size = iter_size
self._sample_width = sample_width
self._volume_percentage = 50
self._stop_recording = threading.Event()
self._source_lock = threading.RLock()
self._recording = False
self._playing = False
def start_recording(self):
"""Start recording from the audio source."""
self._recording = True
self._stop_recording.clear()
self._source.start()
def stop_recording(self):
"""Stop recording from the audio source."""
self._stop_recording.set()
with self._source_lock:
self._source.stop()
self._recording = False
def start_playback(self):
"""Start playback to the audio sink."""
self._playing = True
self._sink.start()
def stop_playback(self):
"""Stop playback from the audio sink."""
self._sink.flush()
self._sink.stop()
self._playing = False
@property
def recording(self):
return self._recording
@property
def playing(self):
return self._playing
@property
def volume_percentage(self):
"""The current volume setting as an integer percentage (1-100)."""
return self._volume_percentage
@volume_percentage.setter
def volume_percentage(self, new_volume_percentage):
self._volume_percentage = new_volume_percentage
def read(self, size):
"""Read bytes from the source (if currently recording).
"""
with self._source_lock:
return self._source.read(size)
def write(self, buf):
"""Write bytes to the sink (if currently playing).
"""
buf = align_buf(buf, self._sample_width)
buf = normalize_audio_buffer(buf, self.volume_percentage)
return self._sink.write(buf)
def close(self):
"""Close source and sink."""
self._source.close()
self._sink.close()
def __iter__(self):
"""Returns a generator reading data from the stream."""
while True:
if self._stop_recording.is_set():
return
yield self.read(self._iter_size)
@property
def sample_rate(self):
return self._source._sample_rate
@click.command()
@click.option('--record-time', default=5,
metavar='<record time>', show_default=True,
help='Record time in secs')
@click.option('--audio-sample-rate',
default=DEFAULT_AUDIO_SAMPLE_RATE,
metavar='<audio sample rate>', show_default=True,
help='Audio sample rate in hertz.')
@click.option('--audio-sample-width',
default=DEFAULT_AUDIO_SAMPLE_WIDTH,
metavar='<audio sample width>', show_default=True,
help='Audio sample width in bytes.')
@click.option('--audio-iter-size',
default=DEFAULT_AUDIO_ITER_SIZE,
metavar='<audio iter size>', show_default=True,
help='Size of each read during audio stream iteration in bytes.')
@click.option('--audio-block-size',
default=DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
metavar='<audio block size>', show_default=True,
help=('Block size in bytes for each audio device '
'read and write operation..'))
@click.option('--audio-flush-size',
default=DEFAULT_AUDIO_DEVICE_FLUSH_SIZE,
metavar='<audio flush size>', show_default=True,
help=('Size of silence data in bytes written '
'during flush operation'))
def main(record_time, audio_sample_rate, audio_sample_width,
audio_iter_size, audio_block_size, audio_flush_size):
"""Helper command to test audio stream processing.
- Record 5 seconds of 16-bit samples at 16khz.
- Playback the recorded samples.
"""
end_time = time.time() + record_time
audio_device = SoundDeviceStream(sample_rate=audio_sample_rate,
sample_width=audio_sample_width,
block_size=audio_block_size,
flush_size=audio_flush_size)
stream = ConversationStream(source=audio_device,
sink=audio_device,
iter_size=audio_iter_size,
sample_width=audio_sample_width)
samples = []
logging.basicConfig(level=logging.INFO)
logging.info('Starting audio test.')
stream.start_recording()
logging.info('Recording samples.')
while time.time() < end_time:
samples.append(stream.read(audio_block_size))
logging.info('Finished recording.')
stream.stop_recording()
stream.start_playback()
logging.info('Playing back samples.')
while len(samples):
stream.write(samples.pop(0))
logging.info('Finished playback.')
stream.stop_playback()
logging.info('audio test completed.')
stream.close()
if __name__ == '__main__':
main()
|
py | 1a33803cca8b8b48ffd5375d507a27335f4ac652 | ll_proto = \
"""
node life {
has anchor owner;
can infer.year_from_date;
}
node year {
has anchor year;
can infer.month_from_date;
}
node month {
has anchor month;
can infer.year_from_date;
can infer.week_from_date;
}
node week {
has anchor week;
can infer.month_from_date;
can infer.day_from_date, date.day_from_date;
}
node day: has anchor day;
node workette {
has name, order, date, owner, status, snooze_till;
has note, is_MIT, is_ritual;
}
edge past;
edge parent;
walker get_day {
has date;
life: take --> node::year == infer.year_from_date(date);
year: take --> node::month == infer.month_from_date(date);
month: take --> node::week == infer.week_from_date(date);
week: take --> node::day == date.day_from_date(date);
day: report here;
report false;
}
walker get_latest_day {
has before_date;
has anchor latest_day;
if(!before_date): before_date = std.time_now();
if(!latest_day): latest_day = 0;
life {
ignore --> node::year > infer.year_from_date(before_date);
take net.max(--> node::year);
}
year {
ignore node::month > infer.month_from_date(before_date);
take net.max(--> node::month)
else {
ignore here;
take <-- node::life;
}
}
month {
ignore node::week > infer.week_from_date(before_date);
take net.max(--> node::week)
else {
ignore here;
take <-- node::year == infer.year_from_date(before_date);
}
}
week {
ignore node::day > infer.day_from_date(before_date);
take net.max(--> node::day)
else {
ignore here;
take <-- node::month == infer.month_from_date(before_date);
}
}
day {
latest_day = here;
report here;
}
}
walker get_gen_day {
has date;
has anchor day_node;
if(!date): date=std.time_now();
root: take --> node::life;
life: take --> node::year == infer.year_from_date(date) else {
new = spawn here --> node::year ;
new.year = infer.year_from_date(date);
take --> node::year == infer.year_from_date(date);
}
year: take --> node::month == infer.month_from_date(date) else {
new = spawn here --> node::month;
new.month = infer.month_from_date(date);
take --> node::month == infer.month_from_date(date);
}
month: take --> node::week == infer.week_from_date(date) else {
new = spawn here --> node::week;
new.week = infer.week_from_date(date);
take --> node::week == infer.week_from_date(date);
}
week: take --> node::day == infer.day_from_date(date) else {
latest_day = spawn here walker::get_latest_day;
new = spawn here --> node::day;
new.day = infer.day_from_date(date);
if(latest_day and infer.day_from_date(date) ==
infer.day_from_date(std.time_now())) {
spawn latest_day walker::carry_forward(parent=new);
take new;
}
elif(latest_day) {
take latest_day;
}
else: take new;
}
day {
day_node = here;
take --> node::workette;
}
workette {
report here;
take --> node::workette;
}
}
walker get_sub_workettes {
report here;
workette: take --> node::workette;
}
walker carry_forward {
has parent;
day {
take --> node::workette;
}
workette {
if(here.status == 'done' or
here.status == 'eliminated') {
disengage;
}
new_workette = spawn here <-[past]- node::workette;
new_workette <-[parent]- parent;
new_workette := here;
spawn --> node::workette
walker::carry_forward(parent=new_workette);
}
}
walker gen_rand_life {
has num_workettes;
root: take --> node::life;
life {
num_workettes = 10;
num_days = rand.integer(2, 4);
for i=0 to i<num_days by i+=1 {
spawn here walker::get_gen_day(
date=rand.time("2019-01-01", "2019-12-31")
);
}
take -->;
}
year, month, week { take -->; }
day, workette {
if(num_workettes == 0): disengage;
gen_num = rand.integer(5, 8);
for i=0 to i<gen_num by i+=1 {
spawn here -[parent]-> node::workette(name=rand.sentence());
}
take --> ;
num_workettes -= 1;
}
}
walker init {
has owner;
has anchor life_node;
take (--> node::life == owner) else {
life_node = spawn here --> node::life;
life_node.owner = owner;
disengage;
}
}
"""
prog0 = \
"""
node testnode:0 {
has a, b, c;
can std.log::a,b::>c with exit;
}
walker testwalk {
testnode {
here.a = 43;
here.b = 'Yeah \\n"fools"!';
report here.b;
if(4 > 6) { std.log("a"); }
elif(5>6) { std.log("b"); }
elif(6>6) { std.log("c"); }
elif(7>6) { std.log(576); }
}
}
node life:0 {
}
node year {
has anchor year;
}
walker another {
life {
here.a = 43;
here.b = 'Yeah \\n"fools"!';
report here.b;
if("4 > 6" == "4 > 6") { std.log("a"); }
}
}
"""
prog1 = \
"""
node testnode:0 {
has a, b, c;
can std.log::a,b::>c with exit;
}
walker testwalk {
testnode {
here.a = 43;
here.b = 'Yeah \\n"fools"!';
report here.b;
if(4 > 6) { std.log("a"); }
elif(5>6) { std.log("b"); }
elif(6>6) { std.log("c"); }
elif(7>6) { std.log(576); }
}
}
node life:0 {
}
node year {
has anchor year;
}
node month {
has anchor month;
}
node week {
has anchor week;
}
node day {
has anchor day;
}
node workette {
has date, owner, status, snooze_till;
has note, is_MIT, is_ritual;
}
walker use_test {
can use.enc_question, use.enc_answer, use.qa_score;
has output;
q = use.enc_question(["How old are you?",
"which animal is the best?"]);
std.log(q);
a = use.enc_answer(["I'm 40 years old.", "Elephants rule."]);
std.log(a);
output = use.qa_score(q, a);
report output;
}
walker use_test_with_ctx {
can use.enc_question, use.enc_answer, use.qa_score, use.dist_score;
has output;
q = use.enc_question("Who are you?");
a = use.enc_answer("I am jason");
output = use.qa_score(q, a);
report output;
a = use.enc_answer("You are jon");
output = use.qa_score(q, a);
report output;
a = use.enc_answer("Who are you? You are jon");
output = use.qa_score(q, a);
report output;
a = use.enc_answer("Who are you? You are jon");
output = use.qa_score(q, a);
report output;
q1 = use.enc_question("Who are you?");
q2 = use.enc_question("Who you be?");
q3 = use.enc_question("Who I be?");
output = use.dist_score(q1, q2);
report output;
output = use.dist_score(q1, q3);
report output;
output = use.qa_score(q2, use.enc_answer("Who are you? You are jon"));
report output;
output = use.qa_score(q3, use.enc_answer("Who are you? You are jon"));
report output;
output = use.qa_score(q2, use.enc_answer("I am jason"));
report output;
output = use.qa_score(q3, use.enc_answer("I am jason"));
report output;
}
walker use_test_with_ctx2 {
can use.enc_question, use.enc_answer, use.qa_score, use.dist_score;
q1 = use.enc_question("Who are you?");
q2 = use.enc_question("Who you be?");
q3 = use.enc_question("Who I be?");
report use.dist_score(q1, q2);
report use.dist_score(q1, q3);
report use.qa_score(q2, use.enc_answer("Who are you? You are jon"));
report use.qa_score(q3, use.enc_answer("Who are you? You are jon"));
report use.qa_score(q2, use.enc_answer("I am jason"));
report use.qa_score(q3, use.enc_answer("I am jason"));
report use.qa_score(q3, use.enc_answer("I am jason","Who I be?"));
report use.qa_score(q3, use.enc_answer("I am jason Who I be?"));
}
walker use_test_single {
can use.enc_question, use.enc_answer, use.qa_score;
has output;
q = use.enc_question("Who's your daddy?");
a = use.enc_answer("I'm your father.");
output = use.qa_score(q, a);
report output;
}
walker get_day {
has date;
life: take infer.year_from_date(date);
year: take infer.month_from_date(date);
month: take infer.week_from_date(date);
week: take infer.day_from_date(date);
day: report --> ;
}
walker get_gen_day {
has date;
can infer.year_from_date;
can infer.month_from_date;
can infer.week_from_date;
can infer.day_from_date;
life: take --> node::year == infer.year_from_date(date) else {
new = spawn here --> node::year;
new.year = infer.year_from_date(date);
take --> node::year == infer.year_from_date(date);
}
year: take --> node::month == infer.month_from_date(date) else {
new = spawn here --> node::month;
new.month = infer.month_from_date(date);
take --> node::month == infer.month_from_date(date);
}
month: take --> node::week == infer.week_from_date(date) else {
new = spawn here --> node::week;
new.week = infer.week_from_date(date);
take --> node::week == infer.week_from_date(date);
}
week: take --> node::day == infer.day_from_date(date) else {
new = spawn here --> node::day;
new.day = infer.day_from_date(date);
take --> node::day == infer.day_from_date(date);
}
day: report --> ;
}
walker get_sub_workettes {
workette: report --> node::workette;
}
walker get_latest_day {
life: take year.max_outbound;
year: take month.max_outbound;
month: take week.max_outbound;
week: report day.max_outbound;
}
walker carry_forward {
has my_root;
day {
new_day = spawn here --> node::day;
my_root = new_day;
take day.outbound_nodes;
}
workette {
if(workette.status == 'done' or
workette.status == 'eliminated') {
continue;
}
childern = workette.outbound_nodes;
new_workette = spawn here --> node::workette;
parent = me.spawn_history.last(-1);
new_workette <-- parent;
take --> node::workette;
}
report me.spawn_history;
report new_day;
}
"""
edgey = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
here -[apple]-> a;
here -[banana]-> a;
}
}
"""
edgey2 = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
here -[apple]-> a;
here -[banana]-> a;
here !--> a;
}
}
"""
edgey2b = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
b = spawn here --> node::testnode ;
here -[apple]-> a;
here -[banana]-> a;
here !--> a;
}
}
"""
edgey2c = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
b = spawn here --> node::testnode ;
here -[apple]-> a;
here -[banana]-> a;
here !-[apple]-> a;
}
}
"""
edgey3 = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
b = spawn here --> node::testnode ;
here -[apple]-> a;
here -[apple]-> a;
here -[banana]-> a;
here -[banana]-> a;
here -[banana]-> a;
here !-[apple]-> a;
}
}
"""
edgey4 = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
here --> a;
here -[apple]-> a;
here -[banana]-> a;
here !-[generic]-> a;
}
}
"""
edgey5 = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
here --> a;
here --> a;
here -[apple]-> a;
here -[banana]-> a;
here !-[generic]-> -[generic]->;
}
}
"""
edgey6 = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
b = spawn here --> node::testnode ;
here -[apple]-> -[generic]->;
}
}
"""
edgey7 = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
b = spawn here --> node::testnode ;
here --> a;
here --> a;
here -[apple]-> a;
here -[apple]-> b;
here -[banana]-> a;
here !-[generic]-> -[apple]->;
}
}
"""
edge_access = \
"""
node testnode;
edge apple {
has v1, v2;
}
edge banana {
has x1, x2;
}
walker init {
root {
a = spawn here -[apple]-> node::testnode ;
b = spawn here -[banana]-> node::testnode ;
e = -[apple]->.edge[0];
e.v1 = 7;
e = --> node::testnode .edge[1];
e.x1=8;
}
}
"""
has_assign = \
"""
node testnode {
has a=8;
}
walker init {
root {
a = spawn here --> node::testnode ;
b = spawn here --> node::testnode ;
std.log(a.a, b.a);
}
}
"""
set_get_global = \
"""
walker setter {
root {
std.set_global('globby', 59);
}
}
walker getter {
has a;
root {
a=std.get_global('globby');
std.log(std.get_global('globby'));
}
}
"""
set_get_global_dict = \
"""
walker setter {
root {
std.set_global('globby',
{ "max_bot_count": 10, "max_ans_count": 100,
"max_txn_count": 50000, "max_test_suite": 5,
"max_test_cases": 50, "export_import": true,
"analytics": true, "integration": "All"
});
}
}
walker getter {
has a;
root {
a=std.get_global('globby');
std.log(std.get_global('globby'));
report std.get_global('globby');
}
}
"""
version_label = \
"""
version: "alpha-1.0"
walker setter {
root {
std.set_global('globby', 59);
}
}
walker getter {
has a;
root {
a=std.get_global('globby');
std.log(std.get_global('globby'));
}
}
"""
sharable = \
"""
node life {
}
walker init {
root {
new = spawn here --> node::life;
take -->;
}
life {
std.out(here);
}
}
"""
basic = \
"""
node life {
}
walker init {
root {
new = spawn here --> node::life;
take -->;
}
life {
}
}
"""
visibility_builtins = \
"""
node testnode {
has yo, mama;
}
edge apple {
has v1, v2;
}
edge banana {
has x1, x2;
}
walker init {
root {
a = spawn here -[apple]-> node::testnode ;
a.yo="Yeah i said";
a.mama="Yo Mama Fool!";
b = spawn here -[banana]-> node::testnode ;
e = -[apple]->.edge[0];
e.v1 = 7;
e = --> node::testnode .edge[1];
e.x1=8;
report [a.context, b.info, e.details];
}
}
"""
spawn_ctx_edge_node = \
"""
node person: has name, age, birthday, profession;
edge friend: has meeting_place;
edge family: has kind;
walker init {
person1 = spawn here -[friend(meeting_place = "college")]->
node::person(name = "Josh", age = 32);
person2 = spawn here -[family(kind = "sister")] ->
node::person(name = "Jane", age = 30);
for i in -->{
report i.context;
report i.edge[0].context;
}
}
"""
filter_ctx_edge_node = \
"""
node person: has name, age, birthday, profession;
edge friend: has meeting_place;
edge family: has kind;
walker init {
person1 = spawn here -[friend(meeting_place = "college")]->
node::person(name = "Josh", age = 32);
person2 = spawn here -[family(kind = "sister")] ->
node::person(name = "Jane", age = 30);
report --> node::person(name=='Jane')[0].context;
report -[family(kind=="brother")]->;
}
"""
null_handleing = \
"""
node person: has name, age, birthday, profession;
walker init {
person1 = spawn here -->
node::person(name = "Josh", age = 32);
if(person1.birthday==null): report true;
else: report false;
if(person1.name==null): report true;
else: report false;
person1.name=null;
report person1.name==null;
person1.name=0;
report person1.name==null;
}
"""
bool_type_convert = \
"""
node person: has name;
walker init {
p1 = spawn here -->
node::person(name = "Josh");
p1.name = true;
report p1.name;
std.log(p1.name);
report p1.context;
}
"""
typecasts = \
"""
walker init {
a=5.6;
report (a+2);
report (a+2).int;
report (a+2).str;
report (a+2).bool;
report (a+2).int.float;
if(a.str.type == str and !(a.int.type == str)
and a.int.type == int):
report "Types comes back correct";
}
"""
typecasts_error = \
"""
walker init {
a=5.6;
report (a+2);
report (a+2).int;
report (a+2).str;
report (a+2).edge;
report ("a+2").int.float;
if(a.str.type == str and !(a.int.type == str)
and a.int.type == int):
report "Types comes back correct";
}
"""
filter_on_context = \
"""
node testnode {
has yo, mama;
}
edge apple {
has v1, v2;
}
edge banana {
has x1, x2;
}
walker init {
root {
a = spawn here -[apple]-> node::testnode ;
a.yo="Yeah i said";
a.mama="Yo Mama Fool!";
b = spawn here -[banana]-> node::testnode ;
e = -[apple]->.edge[0];
e.v1 = 7;
e = --> node::testnode .edge[1];
e.x1=8;
report [a.context.{yo}, b.info.{jid,j_type}, e.details];
}
}
"""
string_manipulation = \
"""
walker init {
a=" tEsting me ";
report a[4];
report a[4:7];
report a[3:-1];
report a.str::upper;
report a.str::lower;
report a.str::title;
report a.str::capitalize;
report a.str::swap_case;
report a.str::is_alnum;
report a.str::is_alpha;
report a.str::is_digit;
report a.str::is_title;
report a.str::is_upper;
report a.str::is_lower;
report a.str::is_space;
report a.str::count('t');
report a.str::find('i');
report a.str::split;
report a.str::split('E');
report a.str::startswith('tEs');
report a.str::endswith('me');
report a.str::replace('me', 'you');
report a.str::strip;
report a.str::strip(' t');
report a.str::lstrip;
report a.str::lstrip(' tE');
report a.str::rstrip;
report a.str::rstrip(' e');
report a.str::upper.str::is_upper;
}
"""
string_join = \
"""
walker init {
a=['test', 'me', 'now'];
report '_'.str::join(a);
}
"""
sub_list = \
"""
walker init {
a=[1,2,3,4,5,6,7,8,9];
report a[4:7];
}
"""
destroy_and_misc = \
"""
node person: has name, age, birthday, profession;
edge friend: has meeting_place;
edge family: has kind;
walker init {
person1 = spawn here -[friend(meeting_place = "college")]->
node::person(name = "Josh", age = 32);
person2 = spawn here -[family(kind = "sister")] ->
node::person(name = "Jane", age = 30);
report person1.name;
destroy person1.name;
report person1.context;
person1.name="pete";
report person1.context;
a=[1,2,3];
destroy a[1];
report a;
b={'a': 'b', 'c':'d'};
destroy b['c'];
report b;
a=[1,2,3,5,6,7,8,9];
destroy a[2:4];
report a;
a[2:4]=[45,33];
report a;
destroy a;
report a;
person1.banana=45;
report person1.context;
report 'age' in person1.context;
}
"""
arbitrary_assign_on_element = \
"""
node person: has name, age, birthday, profession;
walker init {
some = spawn here --> node::person;
some.apple = 45;
report some.context;
}
"""
try_else_stmts = \
"""
walker init {
a=null;
try {a=2/0;}
else with err {report err;}
try {a=2/0;}
else {report 'dont need err';}
try {a=2/0;}
try {a=2/0;}
report a;
try {a=2/1;}
report a;
}
"""
node_edge_same_name = \
"""
node person: has name, age, birthday, profession;
edge person: has meeting_place;
walker init {
person1 = spawn here -[person(meeting_place = "college")]->
node::person(name = "Josh", age = 32);
report -->.edge[0].context;
report -->[0].context;
}
"""
testcases = \
"""
node testnode {
has yo, mama;
}
node apple {
has v1, v2;
}
node banana {
has x1, x2;
}
graph dummy {
has anchor graph_root;
spawn {
graph_root = spawn node::testnode (yo="Hey yo!");
n1=spawn node::apple(v1="I'm apple");
n2=spawn node::banana(x1="I'm banana");
graph_root --> n1 --> n2;
}
}
walker init {
has num=4;
report here.context;
report num;
take -->;
}
test "basic test with refs"
with graph::dummy by walker::init;
test "test with refs and assert block"
with graph::dummy by walker::init {
report "ASSERT BLOCK";
}
test "test with graph ref and walker block"
with graph::dummy by walker {
report here.context;
report "IN generic walker";
take -->;
}
test "test with graph block and walker ref"
with graph {
has anchor graph_root;
spawn {
graph_root = spawn node::testnode (yo="Hey yo!");
n1=spawn node::apple(v1="I'm apple");
n2=spawn node::banana(x1="I'm banana");
graph_root --> n1 --> n2;
graph_root --> n2;
}
} by walker::init {
report "ASSERT BLOCK";
}
"""
testcase_asserts = \
"""
node testnode {
has yo, mama;
}
node apple {
has v1, v2;
}
node banana {
has x1, x2;
}
graph dummy {
has anchor graph_root;
spawn {
graph_root = spawn node::testnode (yo="Hey yo!");
n1=spawn node::apple(v1="I'm apple");
n2=spawn node::banana(x1="I'm banana");
graph_root --> n1 --> n2;
}
}
walker init {
has num=4;
report here.context;
report num;
take -->;
}
test "assert should be valid"
with graph::dummy by walker::init {
assert (num==4);
assert (here.x1=="I'm banana");
assert <--[0].v1=="I'm apple";
}
test "assert should fail"
with graph::dummy by walker::init {
assert (num==4);
assert (here.x1=="I'm banana");
assert <--[0].v1=="I'm Apple";
}
test "assert should fail, add internal except"
with graph::dummy by walker::init {
assert (num==4);
assert (here.x1=="I'm banana");
assert <--[10].v1=="I'm apple";
}
"""
report_not_to_jacset = \
"""
node testnode {
has yo, mama;
}
walker init {
spawn here --> node::testnode;
report -->;
}
"""
walker_spawn_unwrap_check = \
"""
node testnode {
has yo, mama;
}
walker print {
has anchor nd;
nd=here;
}
walker init {
report &(spawn here walker::print);
}
"""
|
py | 1a33813b2010f168bc5696fa5980830da108e284 | from SloppyCell.ReactionNetworks import *
# Modifications to SBML...
# Removed function LD, because it used 'ceil' which is not something we can deal
# with
# Replaced variable value_of_LD with light (more descriptive name)
# Replaced calls to LD with light
# Removed timeOfDay and dayLength variables
net = IO.from_SBML_file('BIOMD055-noceil.xml', 'base')
net.compile()
# Set up a network that will switch light on/off at 12 hour intervals.
net1212 = net.copy('net_1212')
net1212.set_var_ic('light', 1)
net1212.add_parameter('turntime', 12, is_constant=False)
net1212.add_event('light_switch', 'gt(time, turntime)', {'light': '1-light',
'turntime': '12+time'})
mutant_net = net1212.copy('cca1lhy')
mutant_net.set_var_ic('p1', net.get_var_ic('p1')/1000)
# Run to the limit cycle
traj = Dynamics.integrate(net1212, [0, 24*10])
net1212.set_var_ics(traj.get_var_vals_index(-1))
# Go to limit cycle
traj = Dynamics.integrate(mutant_net, [0, 24*10])
mutant_net.set_var_ics(traj.get_var_vals_index(-1))
net_12L_12D_12L_D = net1212.copy('net_12L_12D_12L_D')
net_12L_12D_12L_D.remove_component('light_switch')
net_12L_12D_12L_D.remove_component('turntime')
net_12L_12D_12L_D.set_var_ic('light', 1)
net_12L_12D_12L_D.add_event('off_12', 'gt(time, 12)', {'light': 0})
net_12L_12D_12L_D.add_event('on_24', 'gt(time, 24)', {'light': 1})
net_12L_12D_12L_D.add_event('off_36', 'gt(time, 36)', {'light': 0})
# Run for twelve more hours to get to the dark part of the cycle
traj = Dynamics.integrate(net1212, [0, 12])
net1212.set_var_ics(traj.get_var_vals_index(-1))
net_12D_L = net1212.copy('net_12D_L')
net_12D_L.remove_component('light_switch')
net_12D_L.remove_component('turntime')
net_12D_L.set_var_ic('light', 0)
net_12D_L.add_event('on_12', 'gt(time, 12)', {'light': 1})
mutant_12L_12D_12L_D = mutant_net.copy('mutant_12L_12D_12L_D')
mutant_12L_12D_12L_D.remove_component('light_switch')
mutant_12L_12D_12L_D.remove_component('turntime')
mutant_12L_12D_12L_D.set_var_ic('light', 1)
mutant_12L_12D_12L_D.add_event('off_12', 'gt(time, 12)', {'light': 0})
mutant_12L_12D_12L_D.add_event('on_24', 'gt(time, 24)', {'light': 1})
mutant_12L_12D_12L_D.add_event('off_36', 'gt(time, 36)', {'light': 0})
trajm = Dynamics.integrate(mutant_12L_12D_12L_D, [0, 96])
# Run for twelve more hours to get to the dark part of the cycle
traj = Dynamics.integrate(mutant_net, [0, 12])
mutant_net.set_var_ics(traj.get_var_vals_index(-1))
mutant_12D_L = mutant_net.copy('mutant_12D_L')
mutant_12D_L.remove_component('light_switch')
mutant_12D_L.remove_component('turntime')
mutant_12D_L.set_var_ic('light', 0)
mutant_12D_L.add_event('on_12', 'gt(time, 12)', {'light': 1})
networks = [net_12L_12D_12L_D, net_12D_L, mutant_12L_12D_12L_D, mutant_12D_L]
int_times = [(0, 96), (0, 96), (0, 48), (0,48)]
|
py | 1a33824603a62ff243a7fec94c576dd447a75625 | # -*- coding: utf-8 -*-
"""
pygments.lexers.graph
~~~~~~~~~~~~~~~~~~~~~
Lexers for graph query languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, this
from pygments.token import Keyword, Punctuation, Comment, Operator, Name,\
String, Number, Whitespace
__all__ = ['CypherLexer']
class CypherLexer(RegexLexer):
"""
For `Cypher Query Language
<http://docs.neo4j.org/chunked/milestone/cypher-query-lang.html>`_
For the Cypher version in Neo4J 2.0
.. versionadded:: 2.0
"""
name = 'Cypher'
aliases = ['cypher']
filenames = ['*.cyp', '*.cypher']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
include('comment'),
include('keywords'),
include('clauses'),
include('relations'),
include('strings'),
include('whitespace'),
include('barewords'),
],
'comment': [
(r'^.*//.*\n', Comment.Single),
],
'keywords': [
(r'(create|order|match|limit|set|skip|start|return|with|where|'
r'delete|foreach|not|by)\b', Keyword),
],
'clauses': [
# TODO: many missing ones, see http://docs.neo4j.org/refcard/2.0/
(r'(all|any|as|asc|create|create\s+unique|delete|'
r'desc|distinct|foreach|in|is\s+null|limit|match|none|'
r'order\s+by|return|set|skip|single|start|union|where|with)\b',
Keyword),
],
'relations': [
(r'(-\[)(.*?)(\]->)', bygroups(Operator, using(this), Operator)),
(r'(<-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
(r'(-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
(r'-->|<--|\[|\]', Operator),
(r'<|>|<>|=|<=|=>|\(|\)|\||:|,|;', Punctuation),
(r'[.*{}]', Punctuation),
],
'strings': [
(r'"(?:\\[tbnrf\'"\\]|[^\\"])*"', String),
(r'`(?:``|[^`])+`', Name.Variable),
],
'whitespace': [
(r'\s+', Whitespace),
],
'barewords': [
(r'[a-z]\w*', Name),
(r'\d+', Number),
],
}
|
py | 1a3382c22d78006a66b6190c084b5132d8cda969 | """
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['main.py']
DATA_FILES = []
OPTIONS = {'includes':['PySide2', 'PyQt5']}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
|
py | 1a338468a8fad917b3ce739b429fbc33e1305562 | # Copyright (c) 2017-2019 Datasud.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.exceptions import ValidationError
from django import forms
from django.forms.models import ModelChoiceIterator
from django.utils import timezone
from idgo_admin.models import Category
from idgo_admin.models import Dataset
from idgo_admin.models import DataType
from idgo_admin.models import Granularity
from idgo_admin.models import License
from idgo_admin.models import Organisation
from idgo_admin.models import Support
import re
from taggit.forms import TagField
from taggit.forms import TagWidget
CKAN_URL = settings.CKAN_URL
DEFAULT_CONTACT_EMAIL = settings.DEFAULT_CONTACT_EMAIL
DEFAULT_PLATFORM_NAME = settings.DEFAULT_PLATFORM_NAME
DOMAIN_NAME = settings.DOMAIN_NAME
GEONETWORK_URL = settings.GEONETWORK_URL
TODAY = timezone.now().date().strftime('%d/%m/%Y')
# Définition de DatatypeField
# ===========================
class DatatypeModelIterator(ModelChoiceIterator):
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
queryset = self.queryset
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
for obj in queryset:
if obj.slug == 'donnees-moissonnees':
continue
yield self.choice(obj)
class DatatypeModelMultipleChoiceField(forms.ModelMultipleChoiceField):
iterator = DatatypeModelIterator
class DatatypeField(DatatypeModelMultipleChoiceField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('label', "Type de données")
kwargs.setdefault('required', False)
kwargs.setdefault('widget', forms.CheckboxSelectMultiple())
kwargs.setdefault('queryset', DataType.objects.all())
super().__init__(*args, **kwargs)
# ========================================
# Formulaire d'édition d'un jeu de données
# ========================================
class DatasetForm(forms.ModelForm):
class Meta(object):
model = Dataset
fields = (
'broadcaster_email',
'broadcaster_name',
'categories',
'data_type',
'date_creation',
'date_modification',
'date_publication',
'description',
'geocover',
'granularity',
'keywords',
'license',
'organisation',
'owner_email',
'owner_name',
'published',
'support',
'thumbnail',
'update_frequency',
'title',
'slug')
title = forms.CharField(
label="Titre*",
required=True,
widget=forms.Textarea(
attrs={
'placeholder': "Titre de votre jeu de données",
'rows': 1,
},
),
)
slug = forms.CharField(
label="URL du jeu de données",
required=False,
max_length=100,
widget=forms.TextInput(
attrs={
'addon_before': '{}/dataset/'.format(CKAN_URL),
'addon_before_class': 'input-group-addon',
'addon_after': '<button class="btn btn-default" type="button" />',
'addon_after_class': 'input-group-btn',
'autocomplete': 'off',
'readonly': True,
'placeholder': '',
},
),
)
description = forms.CharField(
label="Description",
required=False,
widget=forms.Textarea(
attrs={
'placeholder': "Vous pouvez utiliser le langage Markdown ici",
},
),
)
class CustomClearableFileInput(forms.ClearableFileInput):
template_name = 'idgo_admin/widgets/file_drop_zone.html'
thumbnail = forms.FileField(
label="Illustration",
required=False,
widget=CustomClearableFileInput(
attrs={
'value': None,
'max_size_info': 1048576,
},
),
)
keywords = TagField(
label="Liste de mots-clés",
required=False,
widget=TagWidget(
attrs={
'autocomplete': 'off',
'class': 'typeahead',
'placeholder': "Utilisez la virgule comme séparateur",
},
),
)
categories = forms.ModelMultipleChoiceField(
label="Catégories (sélectionnez dans la liste ci-dessous une ou plusieurs catégories)",
required=False,
queryset=Category.objects.all(),
widget=forms.CheckboxSelectMultiple(),
)
date_creation = forms.DateField(
label="Date de création",
required=False,
widget=forms.DateInput(
attrs={
'autocomplete': 'off',
'class': 'datepicker',
'placeholder': "{0} (par défaut)".format(TODAY),
},
),
)
date_modification = forms.DateField(
label="Date de dernière modification",
required=False,
widget=forms.DateInput(
attrs={
'autocomplete': 'off',
'class': 'datepicker',
'placeholder': "{0} (par défaut)".format(TODAY),
},
),
)
date_publication = forms.DateField(
label="Date de publication",
required=False,
widget=forms.DateInput(
attrs={
'autocomplete': 'off',
'class': 'datepicker',
'placeholder': "{0} (par défaut)".format(TODAY),
},
),
)
update_frequency = forms.ChoiceField(
label="Fréquence de mise à jour",
required=False,
choices=Dataset.FREQUENCY_CHOICES,
)
geocover = forms.ChoiceField(
label="Couverture géographique",
required=False,
choices=Dataset.GEOCOVER_CHOICES,
)
granularity = forms.ModelChoiceField(
label="Granularité de la couverture territoriale",
empty_label="Sélectionnez une valeur",
required=False,
queryset=Granularity.objects.all().order_by('order'),
)
organisation = forms.ModelChoiceField(
label="Organisation à laquelle est rattaché ce jeu de données*",
empty_label="Sélectionnez une organisation",
required=True,
queryset=Organisation.objects.all(),
)
license = forms.ModelChoiceField(
label="Licence*",
empty_label="Sélectionnez une licence",
required=True,
queryset=License.objects.all(),
)
support = forms.ModelChoiceField(
label="Support technique",
empty_label="Aucun",
required=False,
queryset=Support.objects.all(),
)
data_type = DatatypeField(
# Cf. définition plus haut
)
owner_name = forms.CharField(
label="Nom du producteur",
required=False,
)
owner_email = forms.EmailField(
label="Adresse e-mail du producteur",
required=False,
error_messages={
'invalid': "L'adresse e-mail est invalide.",
},
)
broadcaster_name = forms.CharField(
label="Nom du diffuseur",
required=False,
)
broadcaster_email = forms.EmailField(
label="Adresse e-mail du diffuseur",
required=False,
error_messages={
'invalid': "L'adresse e-mail est invalide.",
},
)
published = forms.BooleanField(
label="Publier le jeu de données",
required=False,
initial=True,
)
def __init__(self, *args, **kwargs):
self.include_args = kwargs.pop('include', {})
super().__init__(*args, **kwargs)
instance = kwargs.get('instance', None)
owner = instance \
and instance.editor or self.include_args.get('user')
self.fields['organisation'].queryset = Organisation.objects.filter(
liaisonscontributeurs__user=owner,
liaisonscontributeurs__validated_on__isnull=False)
self.fields['owner_name'].initial = owner.get_full_name()
self.fields['owner_name'].widget.attrs['placeholder'] = \
'{} (valeur par défaut)'.format(owner.get_full_name())
self.fields['owner_email'].initial = owner.email
self.fields['owner_email'].widget.attrs['placeholder'] = \
'{} (valeur par défaut)'.format(owner.email)
self.fields['broadcaster_name'].widget.attrs['placeholder'] = \
instance and instance.support and instance.support.name or DEFAULT_PLATFORM_NAME
self.fields['broadcaster_email'].widget.attrs['placeholder'] = \
instance and instance.support and instance.support.email or DEFAULT_CONTACT_EMAIL
if instance and instance.thumbnail:
self.fields['thumbnail'].widget.attrs['value'] = instance.thumbnail.url
if not instance:
self.fields['granularity'].initial = 'indefinie'
def clean(self):
title = self.cleaned_data.get('title')
if not re.match('^[a-z0-9\-]{1,100}$', self.cleaned_data.get('slug')):
self.add_error('slug', (
"Seuls les caractères alphanumériques et le tiret sont "
"autorisés (100 caractères maximum)."))
raise ValidationError('KeywordsError')
if self.include_args['identification']:
dataset = Dataset.objects.get(id=self.include_args['id'])
if title != dataset.title and Dataset.objects.filter(title=title).exists():
self.add_error('title', 'Ce nom est réservé.')
raise ValidationError("Dataset '{0}' already exists".format(title))
if not self.include_args['identification'] \
and Dataset.objects.filter(title=title).exists():
self.add_error('title', 'Le jeu de données "{0}" existe déjà'.format(title))
raise ValidationError("Dataset '{0}' already exists".format(title))
kwords = self.cleaned_data.get('keywords')
if kwords:
for w in kwords:
if len(w) < 2:
self.add_error('keywords', "La taille minimum pour un mot clé est de 2 caractères. ")
raise ValidationError("KeywordsError")
regex = '^[a-zA-Z0-9áàâäãåçéèêëíìîïñóòôöõúùûüýÿæœÁÀÂÄÃÅÇÉÈÊËÍÌÎÏÑÓÒÔÖÕÚÙÛÜÝŸÆŒ\._\-\s]*$'
if not re.match(regex, w):
self.add_error('keywords', "Les mots-clés ne peuvent pas contenir de caractères spéciaux. ")
raise ValidationError('KeywordsError')
return self.cleaned_data
|
py | 1a3385bd9f04e73f8b69648643b8a7357f3c599b | """
pytoch Edgecortix backend
"""
# pylint: disable=unused-argument,missing-docstring
import os
import torch
import backend
import ip_runtime
import torchvision
import torchvision.transforms as transforms
import tvm
from tvm import relay
from tvm.relay import mera
from tqdm import tqdm
from PIL import Image
from torch.utils.data import DataLoader, Dataset
class CalibrationDataset(Dataset):
def __init__(self, root, files, transform):
with open(files, 'r') as f:
self.files = [os.path.join(root, fn.strip()) for fn in f.readlines()]
self.transform = transform
def __getitem__(self, idx):
image = Image.open(self.files[idx]).convert('RGB')
image = self.transform(image)
return image
def __len__(self):
return len(self.files)
class BackendEdgecortix(backend.Backend):
def __init__(self, dataset_path, dataset_calibration_list):
super(BackendEdgecortix, self).__init__()
self.sess = None
self.model = None
self.iprt = None
self.device = "cpu"
self.dataset_path = dataset_path
self.dataset_calibration_list = dataset_calibration_list
def version(self):
return ""
def name(self):
return "edgecortix"
def image_format(self):
return "NHWC"
def quantize_model(self, transform, quantization_backend='fbgemm'):
print(torch.backends.quantized.supported_engines)
print(quantization_backend)
if quantization_backend not in torch.backends.quantized.supported_engines:
raise RuntimeError("Quantized backend not supported ")
torch.backends.quantized.engine = quantization_backend
self.model.cpu()
self.model.eval()
self.model.fuse_model()
dataset = CalibrationDataset(root=self.dataset_path, files=self.dataset_calibration_list, transform=transform)
dataloader = DataLoader(dataset, batch_size=1)
self.model.qconfig = torch.quantization.get_default_qconfig(quantization_backend)
torch.quantization.prepare(self.model, inplace=True)
for x in tqdm(dataloader):
self.model(x)
torch.quantization.convert(self.model, inplace=True)
def compile_model(self, input_shape, torch_input_shape, output_dir, config):
inputs = [("input0", input_shape)]
input_layout = self.image_format()
with torch.no_grad():
traced_model = torch.jit.trace(self.model, torch.rand(torch_input_shape)).eval()
mod, params = relay.frontend.from_pytorch(traced_model, inputs, layout=input_layout)
with mera.build_config(target="IP", **config):
mera.build(mod, params, output_dir=output_dir, host_arch="x86", layout=input_layout)
def load(self, model_path, inputs=None, outputs=None):
arch = {"arch": 400, "scheduler_config": {"mode": "Slow"}}
if model_path == 'torchvision-resnet50':
self.model = torchvision.models.quantization.resnet50(pretrained=True, progress=True, quantize=False)
self.model.eval()
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_shape = (1, 224, 224, 3) # NHWC
torch_input_shape = (1, 3, 224, 224) # NCHW
elif model_path == 'torchvision-mobilenetv2':
self.model = torchvision.models.quantization.mobilenet.mobilenet_v2(pretrained=True, progress=True, quantize=False)
self.model.eval()
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_shape = (1, 224, 224, 3) # NHWC
torch_input_shape = (1, 3, 224, 224) # NCHW
arch = {
"arch": 401,
"scheduler_config": {"mode": "Slow"}
}
else:
raise RuntimeError("Preset model not available: ", model_path)
ec_dir = "./ec-" + model_path
if not os.path.exists(ec_dir):
self.quantize_model(transform)
self.compile_model(input_shape, torch_input_shape, ec_dir, arch)
self.iprt = ip_runtime.IPRuntime()
self.iprt.Setup(ec_dir)
# dummy
self.inputs = ["input"]
self.outputs = ["output"]
return self
def predict(self, feed):
key=[key for key in feed.keys()][0]
output_ip = torch.from_numpy(self.iprt.Run(feed[key])[0])
return [output_ip]
|
py | 1a3386078ba0eae54aecf6c889dcaaa0433bfb1e | # kafka_install.py
# TODO: capture/pipe all output of these commands somewhere they can be used for debugging. add more confirmation output.
# resources:
# https://www.digitalocean.com/community/tutorials/initial-server-setup-with-ubuntu-18-04
# https://askubuntu.com/questions/94060/run-adduser-non-interactively
# https://www.digitalocean.com/community/tutorials/ufw-essentials-common-firewall-rules-and-commands
# https://askubuntu.com/questions/746413/trying-to-install-java-8-unable-to-locate-package-openjdk-8-jre
# https://www.digitalocean.com/community/tutorials/how-to-install-apache-kafka-on-ubuntu-18-04
# https://security.stackexchange.com/questions/45712/how-secure-is-nopasswd-in-passwordless-sudo-mode
# https://kafka-python.readthedocs.io/en/master/install.html
# https://help.ubuntu.com/lts/serverguide/firewall.html
import sys,os,subprocess,socket
from uuid import uuid4
_,install_command = sys.argv
ip_addresses = []
admin_user = ''
if install_command == 'install_root':
# add new user=ADMIN_USER and user=kafka
subprocess.call(['adduser','--disabled-password','--gecos','""',admin_user])
subprocess.call(['adduser','--disabled-password','--gecos','""','kafka'])
# grant ADMIN_USER and kafka sudo privileges
subprocess.call(['usermod','-aG','sudo',admin_user])
subprocess.call(['usermod','-aG','sudo','kafka'])
# setup firewall and let applications be managed by name using builtin ufw
subprocess.call(['ufw','app','list'])
subprocess.call(['ufw','allow','OpenSSH'])
subprocess.call(['ufw','--force','enable'])
subprocess.call(['ufw','status'])
# setup ssh access for user=ADMIN_USER and user=kafka
subprocess.call(['rsync --archive --chown='+admin_user+':'+admin_user+' ~/.ssh /home/'+admin_user],shell=True)
subprocess.call(['rsync --archive --chown=kafka:kafka ~/.ssh /home/kafka'],shell=True)
# allow user=ADMIN_USER and user=kafka to execute sudo commands without password promt
fn = '/etc/sudoers'
f = open(fn,'r')
s = f.read()
f.close()
s = s +'\n'+ admin_user+' ALL=(ALL) NOPASSWD:ALL'
s = s +'\n'+ 'kafka ALL=(ALL) NOPASSWD:ALL'
f = open(fn,'w')
f.write(s)
f.close()
elif install_command == 'install_kafka':
# install openjdk 8
subprocess.call(['sudo','add-apt-repository','ppa:openjdk-r/ppa','-y'])
subprocess.call(['sudo','apt-get','update'])
subprocess.call(['sudo','apt-get','install','openjdk-8-jre','-y'])
# downloads, extract, install
subprocess.call(['mkdir','/home/kafka/Downloads'])
subprocess.call(['curl','https://www.apache.org/dist/kafka/2.1.1/kafka_2.11-2.1.1.tgz','-o','/home/kafka/Downloads/kafka.tgz'])
subprocess.call(['mkdir','/home/kafka/kafka'])
os.chdir('/home/kafka/kafka')
subprocess.call(['tar','-xvzf','/home/kafka/Downloads/kafka.tgz','--strip','1'])
# set kafka configs
fn = '/home/kafka/kafka/config/server.properties'
f = open(fn,'r')
s = f.read()
f.close()
s = s +'\n'+ 'delete.topic.enable=true'
f = open(fn,'w')
f.write(s)
f.close()
# set zookeeper unit definition
fn = '/etc/systemd/system/zookeeper.service'
subprocess.call(['sudo','touch',fn])
subprocess.call(['sudo','chmod','777',fn])
unit_definition = "[Unit]\nRequires=network.target remote-fs.target\nAfter=network.target remote-fs.target\n\n[Service]\nType=simple\nUser=kafka\nExecStart=/home/kafka/kafka/bin/zookeeper-server-start.sh /home/kafka/kafka/config/zookeeper.properties\nExecStop=/home/kafka/kafka/bin/zookeeper-server-stop.sh\nRestart=on-abnormal\n\n[Install]\nWantedBy=multi-user.target"
f = open(fn,'w')
f.write(unit_definition)
f.close()
# set kafka unit definition
fn = '/etc/systemd/system/kafka.service'
subprocess.call(['sudo','touch',fn])
subprocess.call(['sudo','chmod','777',fn])
unit_definition = "[Unit]\nRequires=zookeeper.service\nAfter=zookeeper.service\n\n[Service]\nType=simple\nUser=kafka\nExecStart=/bin/sh -c '/home/kafka/kafka/bin/kafka-server-start.sh /home/kafka/kafka/config/server.properties > /home/kafka/kafka/kafka.log 2>&1'\nExecStop=/home/kafka/kafka/bin/kafka-server-stop.sh\nRestart=on-abnormal\n\n[Install]\nWantedBy=multi-user.target'"
f = open(fn,'w')
f.write(unit_definition)
f.close()
# prepare network for running kafka and zookeeper
ip_address = socket.gethostbyname(socket.gethostname())
subprocess.call(['sudo','hostnamectl','set-hostname',ip_address])
subprocess.call(['sudo','ufw','allow','9092'])
subprocess.call(['sudo','ufw','allow','2181'])
# start kafka and check
subprocess.call(['sudo','systemctl','start','kafka'])
subprocess.call(['sudo','journalctl','-u','kafka'])
# enable on boot
#subprocess.call(['sudo','systemctl','enable','kafka']) DOES NOT WORK.
# install pip3
subprocess.call(['sudo','apt-get','-y','install','python3-pip'])
# install kafka-python with optionals
subprocess.call(['sudo','pip3','install','kafka-python'])
subprocess.call(['sudo','pip3','install','lz4'])
subprocess.call(['sudo','pip3','install','crc32c'])
# assign unique broker_id and zookeeper connection params
fn = '/home/kafka/kafka/config/server.properties'
f = open(fn,'r')
s = f.read()
f.close()
broker_id = int(uuid4())
s = s.replace('broker.id=0','broker.id='+str(broker_id))
sources = []
for ip_address in ip_addresses:
source = ip_address+':2181'
sources.append(source)
sources_str = ','.join(sources)
s = s.replace('zookeeper.connect=localhost:2181','zookeeper.connect='+sources_str)
f = open(fn,'w')
f.write(s)
f.close()
|
py | 1a33861f76580cc46cedf0c7bbdc8bfcd60cd6d7 | class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
dic = {}
for i in range(len(nums)):
if nums[i] in dic:
return [dic[nums[i]], i]
else:
dic[target-nums[i]] = i
|
py | 1a3386dccbaff834cf133f997727059f49331b83 | tabuada=1
while tabuada <=10:
n=1
print("Tabuada %d" %tabuada)
while n<=10:
print("%d x %d = %d" %(tabuada, n, tabuada*n))
n=n+1
tabuada=tabuada+1
|
py | 1a3389d0db1109cd810463ebe7937038b17c6de0 | import torch
import torch.nn as nn
from models.TransBTS.Transformer import TransformerModel
from models.TransBTS.PositionalEncoding import FixedPositionalEncoding,LearnedPositionalEncoding
from models.TransBTS.Unet_skipconnection import Unet
class TransformerBTS(nn.Module):
def __init__(
self,
img_dim,
patch_dim,
num_channels,
embedding_dim,
num_heads,
num_layers,
hidden_dim,
dropout_rate=0.0,
attn_dropout_rate=0.0,
conv_patch_representation=True,
positional_encoding_type="learned",
):
super(TransformerBTS, self).__init__()
assert embedding_dim % num_heads == 0
assert img_dim % patch_dim == 0
self.img_dim = img_dim
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.patch_dim = patch_dim
self.num_channels = num_channels
self.dropout_rate = dropout_rate
self.attn_dropout_rate = attn_dropout_rate
self.conv_patch_representation = conv_patch_representation
self.num_patches = int((img_dim // patch_dim) ** 3)
self.seq_length = self.num_patches
self.flatten_dim = 128 * num_channels
self.linear_encoding = nn.Linear(self.flatten_dim, self.embedding_dim)
if positional_encoding_type == "learned":
self.position_encoding = LearnedPositionalEncoding(
self.seq_length, self.embedding_dim, self.seq_length
)
elif positional_encoding_type == "fixed":
self.position_encoding = FixedPositionalEncoding(
self.embedding_dim,
)
self.pe_dropout = nn.Dropout(p=self.dropout_rate)
self.transformer = TransformerModel(
embedding_dim,
num_layers,
num_heads,
hidden_dim,
self.dropout_rate,
self.attn_dropout_rate,
)
self.pre_head_ln = nn.LayerNorm(embedding_dim)
if self.conv_patch_representation:
self.conv_x = nn.Conv3d(
128,
self.embedding_dim,
kernel_size=3,
stride=1,
padding=1
)
self.Unet = Unet(in_channels=4, base_channels=16, num_classes=4)
self.bn = nn.BatchNorm3d(128)
self.relu = nn.ReLU(inplace=True)
def encode(self, x):
if self.conv_patch_representation:
# combine embedding with conv patch distribution
x1_1, x2_1, x3_1, x = self.Unet(x)
x = self.bn(x)
x = self.relu(x)
x = self.conv_x(x)
x = x.permute(0, 2, 3, 4, 1).contiguous()
x = x.view(x.size(0), -1, self.embedding_dim)
else:
x = self.Unet(x)
x = self.bn(x)
x = self.relu(x)
x = (
x.unfold(2, 2, 2)
.unfold(3, 2, 2)
.unfold(4, 2, 2)
.contiguous()
)
x = x.view(x.size(0), x.size(1), -1, 8)
x = x.permute(0, 2, 3, 1).contiguous()
x = x.view(x.size(0), -1, self.flatten_dim)
x = self.linear_encoding(x)
x = self.position_encoding(x)
x = self.pe_dropout(x)
# apply transformer
x, intmd_x = self.transformer(x)
x = self.pre_head_ln(x)
return x1_1, x2_1, x3_1, x, intmd_x
def decode(self, x):
raise NotImplementedError("Should be implemented in child class!!")
def forward(self, x, auxillary_output_layers=[1, 2, 3, 4]):
x1_1, x2_1, x3_1, encoder_output, intmd_encoder_outputs = self.encode(x)
decoder_output = self.decode(
x1_1, x2_1, x3_1, encoder_output, intmd_encoder_outputs, auxillary_output_layers
)
if auxillary_output_layers is not None:
auxillary_outputs = {}
for i in auxillary_output_layers:
val = str(2 * i - 1)
_key = 'Z' + str(i)
auxillary_outputs[_key] = intmd_encoder_outputs[val]
return decoder_output
return decoder_output
def _get_padding(self, padding_type, kernel_size):
assert padding_type in ['SAME', 'VALID']
if padding_type == 'SAME':
_list = [(k - 1) // 2 for k in kernel_size]
return tuple(_list)
return tuple(0 for _ in kernel_size)
def _reshape_output(self, x):
x = x.view(
x.size(0),
int(self.img_dim / self.patch_dim),
int(self.img_dim / self.patch_dim),
int(self.img_dim / self.patch_dim),
self.embedding_dim,
)
x = x.permute(0, 4, 1, 2, 3).contiguous()
return x
class BTS(TransformerBTS):
def __init__(
self,
img_dim,
patch_dim,
num_channels,
num_classes,
embedding_dim,
num_heads,
num_layers,
hidden_dim,
dropout_rate=0.0,
attn_dropout_rate=0.0,
conv_patch_representation=True,
positional_encoding_type="learned",
):
super(BTS, self).__init__(
img_dim=img_dim,
patch_dim=patch_dim,
num_channels=num_channels,
embedding_dim=embedding_dim,
num_heads=num_heads,
num_layers=num_layers,
hidden_dim=hidden_dim,
dropout_rate=dropout_rate,
attn_dropout_rate=attn_dropout_rate,
conv_patch_representation=conv_patch_representation,
positional_encoding_type=positional_encoding_type,
)
self.num_classes = num_classes
self.Softmax = nn.Softmax(dim=1)
self.Enblock8_1 = EnBlock1(in_channels=self.embedding_dim)
self.Enblock8_2 = EnBlock2(in_channels=self.embedding_dim // 4)
self.DeUp4 = DeUp_Cat(in_channels=self.embedding_dim//4, out_channels=self.embedding_dim//8)
self.DeBlock4 = DeBlock(in_channels=self.embedding_dim//8)
self.DeUp3 = DeUp_Cat(in_channels=self.embedding_dim//8, out_channels=self.embedding_dim//16)
self.DeBlock3 = DeBlock(in_channels=self.embedding_dim//16)
self.DeUp2 = DeUp_Cat(in_channels=self.embedding_dim//16, out_channels=self.embedding_dim//32)
self.DeBlock2 = DeBlock(in_channels=self.embedding_dim//32)
self.endconv = nn.Conv3d(self.embedding_dim // 32, 4, kernel_size=1)
def decode(self, x1_1, x2_1, x3_1, x, intmd_x, intmd_layers=[1, 2, 3, 4]):
assert intmd_layers is not None, "pass the intermediate layers for MLA"
encoder_outputs = {}
all_keys = []
for i in intmd_layers:
val = str(2 * i - 1)
_key = 'Z' + str(i)
all_keys.append(_key)
encoder_outputs[_key] = intmd_x[val]
all_keys.reverse()
x8 = encoder_outputs[all_keys[0]]
x8 = self._reshape_output(x8)
x8 = self.Enblock8_1(x8)
x8 = self.Enblock8_2(x8)
y4 = self.DeUp4(x8, x3_1) # (1, 64, 32, 32, 32)
y4 = self.DeBlock4(y4)
y3 = self.DeUp3(y4, x2_1) # (1, 32, 64, 64, 64)
y3 = self.DeBlock3(y3)
y2 = self.DeUp2(y3, x1_1) # (1, 16, 128, 128, 128)
y2 = self.DeBlock2(y2)
y = self.endconv(y2) # (1, 4, 128, 128, 128)
y = self.Softmax(y)
return y
class EnBlock1(nn.Module):
def __init__(self, in_channels):
super(EnBlock1, self).__init__()
self.bn1 = nn.BatchNorm3d(512 // 4)
self.relu1 = nn.ReLU(inplace=True)
self.bn2 = nn.BatchNorm3d(512 // 4)
self.relu2 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv3d(in_channels, in_channels // 4, kernel_size=3, padding=1)
self.conv2 = nn.Conv3d(in_channels // 4, in_channels // 4, kernel_size=3, padding=1)
def forward(self, x):
x1 = self.conv1(x)
x1 = self.bn1(x1)
x1 = self.relu1(x1)
x1 = self.conv2(x1)
x1 = self.bn2(x1)
x1 = self.relu2(x1)
return x1
class EnBlock2(nn.Module):
def __init__(self, in_channels):
super(EnBlock2, self).__init__()
self.conv1 = nn.Conv3d(in_channels, in_channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm3d(512 // 4)
self.relu1 = nn.ReLU(inplace=True)
self.bn2 = nn.BatchNorm3d(512 // 4)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv3d(in_channels, in_channels, kernel_size=3, padding=1)
def forward(self, x):
x1 = self.conv1(x)
x1 = self.bn1(x1)
x1 = self.relu1(x1)
x1 = self.conv2(x1)
x1 = self.bn2(x1)
x1 = self.relu2(x1)
x1 = x1 + x
return x1
class DeUp_Cat(nn.Module):
def __init__(self, in_channels, out_channels):
super(DeUp_Cat, self).__init__()
self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=1)
self.conv2 = nn.ConvTranspose3d(out_channels, out_channels, kernel_size=2, stride=2)
self.conv3 = nn.Conv3d(out_channels*2, out_channels, kernel_size=1)
def forward(self, x, prev):
x1 = self.conv1(x)
y = self.conv2(x1)
# y = y + prev
y = torch.cat((prev, y), dim=1)
y = self.conv3(y)
return y
class DeBlock(nn.Module):
def __init__(self, in_channels):
super(DeBlock, self).__init__()
self.bn1 = nn.BatchNorm3d(in_channels)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv3d(in_channels, in_channels, kernel_size=3, padding=1)
self.conv2 = nn.Conv3d(in_channels, in_channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm3d(in_channels)
self.relu2 = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x1 = self.bn1(x1)
x1 = self.relu1(x1)
x1 = self.conv2(x1)
x1 = self.bn2(x1)
x1 = self.relu2(x1)
x1 = x1 + x
return x1
def TransBTS(dataset='brats', _conv_repr=True, _pe_type="learned"):
if dataset.lower() == 'brats':
img_dim = 128
num_classes = 4
num_channels = 4
patch_dim = 8
aux_layers = [1, 2, 3, 4]
model = BTS(
img_dim,
patch_dim,
num_channels,
num_classes,
embedding_dim=512,
num_heads=8,
num_layers=4,
hidden_dim=4096,
dropout_rate=0.1,
attn_dropout_rate=0.1,
conv_patch_representation=_conv_repr,
positional_encoding_type=_pe_type,
)
return aux_layers, model
if __name__ == '__main__':
with torch.no_grad():
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
cuda0 = torch.device('cuda:0')
x = torch.rand((1, 4, 128, 128, 128), device=cuda0)
_, model = TransBTS(dataset='brats', _conv_repr=True, _pe_type="learned")
model.cuda()
y = model(x)
print(y.shape)
|
py | 1a338a0cc69934641d9fa196fcee001f8f29878d | import os
import random
import numpy as np
import time
from openravepy import *
import data_filepaths
from potential_point import PotentialPoint
from workspace_generation_utils import WorkspaceParams
class OpenraveManager(object):
def __init__(self, segment_validity_step, potential_points):
# env_path = os.path.abspath(
# os.path.expanduser('~/ModelBasedDDPG/config/widowx_env.xml'))
env_path = os.path.join(os.getcwd(), 'data', 'config', 'widowx_env.xml')
self.env = Environment()
self.env.StopSimulation()
self.env.Load(env_path) # load a simple scene
self.robot = self.env.GetRobots()[0] # load the robot
self.links_names = [l.GetName() for l in self.robot.GetLinks()]
self.robot.SetActiveDOFs(range(1, 5)) # make the first joint invalid
# set the color
color = np.array([33, 213, 237])
for link in self.robot.GetLinks():
for geom in link.GetGeometries():
geom.SetDiffuseColor(color)
self.objects = []
self.segment_validity_step = segment_validity_step
# translate the potential to list of (unprocessed_point, link, coordinate)
self.potential_points = potential_points
self.joint_safety = 0.0001
self.loaded_params_path = None
self.loaded_params = None
def load_params(self, workspace_params, params_path):
if self.loaded_params_path is not None and self.loaded_params_path == params_path:
# already loaded
return
with self.env:
for i in range(workspace_params.number_of_obstacles):
body = RaveCreateKinBody(self.env, '')
body.SetName('box{}'.format(i))
body.InitFromBoxes(np.array([[0, 0, 0, workspace_params.sides_x[i], 0.01, workspace_params.sides_z[i]]]),
True)
self.env.Add(body, True)
transformation_matrix = np.eye(4)
translation = np.array([
workspace_params.centers_position_x[i], 0.0, workspace_params.centers_position_z[i]])
theta = workspace_params.y_axis_rotation[i]
rotation_matrix = np.array([
[np.cos(theta), 0.0, np.sin(theta)], [0.0, 1.0, 0.0], [-np.sin(theta), 0.0, np.cos(theta)]
])
transformation_matrix[:3, -1] = translation
transformation_matrix[:3, :3] = rotation_matrix
body.SetTransform(transformation_matrix)
self.objects.append(body)
self.loaded_params_path = params_path
self.loaded_params = workspace_params
def remove_objects(self):
with self.env:
while len(self.objects):
body = self.objects.pop()
self.env.Remove(body)
self.loaded_params_path = None
self.loaded_params = None
def set_params(self, params_path):
loaded = self.loaded_params_path
if loaded is None:
self.load_params(WorkspaceParams.load_from_file(params_path), params_path)
return True
else:
if loaded != params_path:
self.remove_objects()
self.load_params(WorkspaceParams.load_from_file(params_path), params_path)
return True
return False
def get_number_of_joints(self):
return self.robot.GetDOF()
def get_joint_bounds(self):
return self.robot.GetDOFLimits()
def get_random_joints(self, fixed_positions_dictionary=None):
joint_bounds = self.get_joint_bounds()
result = []
for i in range(self.get_number_of_joints()):
if fixed_positions_dictionary is not None and i in fixed_positions_dictionary:
result.append(fixed_positions_dictionary[i])
else:
result.append(random.uniform(joint_bounds[0][i], joint_bounds[1][i]))
result = self.truncate_joints(result)
return tuple(result)
def truncate_joints(self, joints):
bounds = self.get_joint_bounds()
res = list(joints)
for i, j in enumerate(joints):
lower = bounds[0][i] + self.joint_safety
res[i] = max(res[i], lower)
upper = bounds[1][i] - self.joint_safety
res[i] = min(res[i], upper)
return tuple(res)
def is_valid(self, joints):
self.robot.SetDOFValues(joints, [0, 1, 2, 3, 4])
res = not self.robot.CheckSelfCollision()
if self.objects is not None:
for item in self.objects:
res = res and not self.env.CheckCollision(self.robot, item)
return res
def plan(self, start_joints, goal_joints, max_planner_iterations):
with self.env:
if not self.is_valid(start_joints) or not self.is_valid(goal_joints):
return None
self.robot.SetDOFValues(start_joints, [0, 1, 2, 3, 4])
manipprob = interfaces.BaseManipulation(self.robot) # create the interface for basic manipulation programs
try:
items_per_trajectory_step = 10
active_joints = self.robot.GetActiveDOF()
# call motion planner with goal joint angles
traj = manipprob.MoveActiveJoints(goal=goal_joints[1:], execute=False, outputtrajobj=True, maxtries=1,
maxiter=max_planner_iterations)
# found plan, if not an exception is thrown and caught below
traj = list(traj.GetWaypoints(0, traj.GetNumWaypoints()))
assert len(traj) % items_per_trajectory_step == 0
# take only the joints values and add the 0 joint.
traj = [[0.0] + traj[x:x + items_per_trajectory_step][:active_joints] for x in
xrange(0, len(traj), items_per_trajectory_step)]
# assert validity
if self.get_last_valid_in_trajectory(traj) != traj[-1]:
return None
# plan found and validated!
return traj
except Exception, e:
print str(e)
return None
def check_segment_validity(self, start_joints, end_joints):
steps = self.partition_segment(start_joints, end_joints)
random.shuffle(steps)
for step in steps:
if not self.is_valid(step):
return False
return True
def partition_segment(self, start_joints, end_joints):
# partition the segment between start joints to end joints
current = np.array(start_joints)
next = np.array(end_joints)
difference = next - current
difference_norm = np.linalg.norm(difference)
step_size = self.segment_validity_step
if difference_norm < step_size:
# if smaller than allowed step just append the next step
return [tuple(end_joints)]
else:
scaled_step = (step_size / difference_norm) * difference
steps = []
for alpha in range(int(np.floor(difference_norm / step_size))):
processed_step = current + (1 + alpha) * scaled_step
steps.append(processed_step)
# we probably have a leftover section, append it to res
last_step_difference = np.linalg.norm(steps[-1] - next)
if last_step_difference > 0.0:
steps.append(next)
# append to list of configuration points to test validity
return [tuple(s) for s in steps]
def get_last_valid_in_trajectory(self, trajectory):
for i in range(len(trajectory)-1):
if not self.check_segment_validity(trajectory[i], trajectory[i+1]):
return trajectory[i]
return trajectory[-1]
def get_initialized_viewer(self):
if self.env.GetViewer() is None:
self.env.SetViewer('qtcoin')
# set camera
camera_transform = np.eye(4)
theta = -np.pi / 2
rotation_matrix = np.array([
[1.0, 0.0, 0.0], [0.0, np.cos(theta), -np.sin(theta)], [0.0, np.sin(theta), np.cos(theta)]
])
camera_transform[:3, :3] = rotation_matrix
camera_transform[:3, 3] = np.array([0.0, -1.0, 0.25])
time.sleep(1)
viewer = self.env.GetViewer()
viewer.SetCamera(camera_transform)
return viewer
@staticmethod
def get_manager_for_workspace(workspace_id, config):
directory = os.path.abspath(os.path.expanduser(config['data']['directory']))
workspace_dir = os.path.join(directory, workspace_id)
potential_points = PotentialPoint.from_config(config)
openrave_manager = OpenraveManager(config['data']['joint_segment_validity_step'], potential_points)
workspace_params = WorkspaceParams.load_from_file(data_filepaths.get_workspace_params_path(workspace_dir))
openrave_manager.load_params(workspace_params)
return openrave_manager, workspace_dir
def get_links_poses(self, joints):
self.robot.SetDOFValues(joints, [0, 1, 2, 3, 4])
poses = self.robot.GetLinkTransformations()
result = {
link_name: tuple(poses[i][[0, 2], -1])
for i, link_name in enumerate(self.links_names) if link_name in self.links_names
}
return result
def get_links_poses_array(self, joints):
poses = self.get_links_poses(joints)
return [poses[link_name] for link_name in self.links_names]
def get_potential_points_poses(self, joints, post_process=True):
self.robot.SetDOFValues(joints, [0, 1, 2, 3, 4])
link_transform = self.robot.GetLinkTransformations()
result = {p.tuple: np.matmul(link_transform[p.link], p.coordinate) for p in self.potential_points}
if post_process:
result = {k: (result[k][0], result[k][2]) for k in result}
return result
def get_target_pose(self, joints):
# target is the last potential
return self.get_potential_points_poses(joints)[self.potential_points[-1].tuple]
@staticmethod
def _post_process_jacobian(j, is_numeric=False):
return j[[0, 2], 1 if is_numeric else 0:].transpose()
def get_links_jacobians(self, joints, modeling_links=None):
if modeling_links is None:
modeling_links = self.links_names
self.robot.SetDOFValues(joints, [0, 1, 2, 3, 4])
poses = self.robot.GetLinkTransformations()
return {
link_name: self._post_process_jacobian(self.robot.CalculateActiveJacobian(i, poses[i][:3, 3]))
for i, link_name in enumerate(self.links_names) if link_name in modeling_links
}
def get_potential_points_jacobians(self, joints):
potential_points_poses = self.get_potential_points_poses(joints, post_process=False)
self.robot.SetDOFValues(joints, [0, 1, 2, 3, 4])
return {
p.tuple: self._post_process_jacobian(
self.robot.CalculateActiveJacobian(p.link, potential_points_poses[p.tuple])
)
for p in self.potential_points
}
# def get_links_numeric_jacobians(self, joints, modeling_links=None, epsilon=0.0001):
# if modeling_links is None:
# modeling_links = self.links_names
# res = {link_name: np.zeros((3, len(joints))) for link_name in modeling_links}
# bounds = self.get_joint_bounds()
# for i in range(len(joints)):
# local_j = [j for j in joints]
# new_value = local_j[i] + epsilon
# use_upper = new_value < bounds[1][i]
# if use_upper:
# local_j[i] += epsilon
# self.robot.SetDOFValues(local_j, [0, 1, 2, 3, 4])
# transforms = self.robot.GetLinkTransformations()
# p1 = {link_name: transforms[i][:3, 3]
# for i, link_name in enumerate(self.links_names) if link_name in modeling_links}
# local_j = [j for j in joints]
# new_value = local_j[i] - epsilon
# use_lower = new_value > bounds[0][i]
# if use_lower:
# local_j[i] -= epsilon
# self.robot.SetDOFValues(local_j, [0, 1, 2, 3, 4])
# transforms = self.robot.GetLinkTransformations()
# p2 = {link_name: transforms[i][:3, 3]
# for i, link_name in enumerate(self.links_names) if link_name in modeling_links}
# for link_name in modeling_links:
# local_res = (p1[link_name]-p2[link_name]) / (use_lower*epsilon + use_upper*epsilon)
# res[link_name][:, i] = local_res
# return {link_name: self._post_process_jacobian(res[link_name], is_numeric=True) for link_name in res}
# def get_target_jacobian(self, joints):
# return self.get_links_jacobians(joints, self.links_names[-1])
# if __name__ == "__main__":
# m = OpenraveManager(0.01)
#
#
# # joints0 = [0.0]*5
# # poses0 = m.get_links_poses(joints0)
# # print m.is_valid(joints0)
# # joints1 = [0.0]*4 + [1.5]
# # poses1 = m.get_links_poses(joints1)
# # print m.is_valid(joints1)
# # m.get_links_poses_array(joints0)
# #
# # joints = [0.1 + 0.2*i for i in range(5)]
# # poses = m.get_links_poses(joints)
# # numeric_jacobians = m.get_links_numeric_jacobians(joints)
# # for i in range(7):
# # m.robot.SetDOFValues(joints, [0, 1, 2, 3, 4])
# # print i
# # print m.links_names[i]
# # print 'pose:'
# # print poses[m.links_names[i]]
# # print 'local jacobian:'
# # print m.robot.CalculateJacobian(i, joints)
# # print 'active jacobian at 0'
# # print m.robot.CalculateActiveJacobian(i, [0.0, 0.0, 0.0])
# # print 'active jacobian at pose'
# # p = m.robot.GetLinkTransformations()[i][:3,3]
# # print m.robot.CalculateActiveJacobian(i, p)
# # print 'numerical jacobian'
# # print numeric_jacobians[m.links_names[i]]
# # print ''
#
#
#
#
#
# # transformed = []
# # for x_corner in [-x_length / 2.0, x_length / 2.0]:
# # for y_corner in [-y_length / 2.0, y_length / 2.0]:
# # for z_corner in [-z_length / 2.0, z_length / 2.0]:
# # corner = np.array([x_corner,y_corner,z_corner,1.0])
# # print 'corner {}'.format(corner)
# # print 'transform:'
# # transformed_corner = np.matmul(body.GetTransform(), corner)
# # transformed.append(transformed_corner)
# # print transformed_corner
#
# from mpl_toolkits.mplot3d import Axes3D
# import matplotlib.pyplot as plt
#
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter([t[0] for t in transformed], [t[1] for t in transformed], [t[2] for t in transformed])
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
#
# plt.show()
#
# print 'here'
if __name__ == "__main__":
potential_points = [PotentialPoint(t) for t in [(4, 0.0, 0.0), (5, 0.0, 0.0)]]
m = OpenraveManager(0.01, potential_points)
joints0 = [0.0] * 5
res1 = m.get_potential_points_poses(joints0)
res2 = m.get_links_poses(joints0)
print res1[potential_points[0].tuple] == res2[m.links_names[potential_points[0].link]]
print res1[potential_points[1].tuple] == res2[m.links_names[potential_points[1].link]]
res3 = m.get_potential_points_jacobians(joints0)
res4 = m.get_links_jacobians(joints0)
print res3[potential_points[0].tuple] == res4[m.links_names[potential_points[0].link]]
print res3[potential_points[1].tuple] == res4[m.links_names[potential_points[1].link]]
joints0 = [0.0] * 5
joints0[2] = np.pi/4
res1 = m.get_potential_points_poses(joints0)
res2 = m.get_links_poses(joints0)
print res1[potential_points[0].tuple] == res2[m.links_names[potential_points[0].link]]
print res1[potential_points[1].tuple] == res2[m.links_names[potential_points[1].link]]
res3 = m.get_potential_points_jacobians(joints0)
res4 = m.get_links_jacobians(joints0)
print res3[potential_points[0].tuple] == res4[m.links_names[potential_points[0].link]]
print res3[potential_points[1].tuple] == res4[m.links_names[potential_points[1].link]] |
py | 1a338bdbc8f3176504ff25fc8947351914531f4d | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import sys
import testtools
from tempest.api.compute import base
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class ServersNegativeTestJSON(base.BaseV2ComputeTest):
def setUp(self):
super(ServersNegativeTestJSON, self).setUp()
try:
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
except Exception:
self.__class__.server_id = self.rebuild_server(self.server_id)
def tearDown(self):
self.server_check_teardown()
super(ServersNegativeTestJSON, self).tearDown()
@classmethod
def resource_setup(cls):
super(ServersNegativeTestJSON, cls).resource_setup()
cls.client = cls.servers_client
cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
cls.alt_client = cls.alt_os.servers_client
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
@test.attr(type=['negative', 'gate'])
def test_server_name_blank(self):
# Create a server with name parameter empty
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
name='')
@test.attr(type=['negative', 'gate'])
def test_personality_file_contents_not_encoded(self):
# Use an unencoded file when creating a server with personality
file_contents = 'This is a test file.'
person = [{'path': '/etc/testfile.txt',
'contents': file_contents}]
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
personality=person)
@test.attr(type=['negative', 'gate'])
def test_create_with_invalid_image(self):
# Create a server with an unknown image
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
image_id=-1)
@test.attr(type=['negative', 'gate'])
def test_create_with_invalid_flavor(self):
# Create a server with an unknown flavor
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
flavor=-1,)
@test.attr(type=['negative', 'gate'])
def test_invalid_access_ip_v4_address(self):
# An access IPv4 address must match a valid address pattern
IPv4 = '1.1.1.1.1.1'
self.assertRaises(exceptions.BadRequest,
self.create_test_server, accessIPv4=IPv4)
@test.attr(type=['negative', 'gate'])
def test_invalid_ip_v6_address(self):
# An access IPv6 address must match a valid address pattern
IPv6 = 'notvalid'
self.assertRaises(exceptions.BadRequest,
self.create_test_server, accessIPv6=IPv6)
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_nonexistent_server(self):
# Resize a non-existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.resize,
nonexistent_server, self.flavor_ref)
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_with_non_existent_flavor(self):
# Resize a server with non-existent flavor
nonexistent_flavor = data_utils.rand_uuid()
self.assertRaises(exceptions.BadRequest, self.client.resize,
self.server_id, flavor_ref=nonexistent_flavor)
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_with_null_flavor(self):
# Resize a server with null flavor
self.assertRaises(exceptions.BadRequest, self.client.resize,
self.server_id, flavor_ref="")
@test.attr(type=['negative', 'gate'])
def test_reboot_non_existent_server(self):
# Reboot a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.reboot,
nonexistent_server, 'SOFT')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(exceptions.Conflict,
self.client.pause_server,
self.server_id)
self.client.unpause_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_rebuild_reboot_deleted_server(self):
# Rebuild and Reboot a deleted server
_, server = self.create_test_server()
self.client.delete_server(server['id'])
self.client.wait_for_server_termination(server['id'])
self.assertRaises(exceptions.NotFound,
self.client.rebuild,
server['id'], self.image_ref_alt)
self.assertRaises(exceptions.NotFound, self.client.reboot,
server['id'], 'SOFT')
@test.attr(type=['negative', 'gate'])
def test_rebuild_non_existent_server(self):
# Rebuild a non existent server
nonexistent_server = data_utils.rand_uuid()
meta = {'rebuild': 'server'}
new_name = data_utils.rand_name('server')
file_contents = 'Test server rebuild.'
personality = [{'path': '/etc/rebuild.txt',
'contents': base64.b64encode(file_contents)}]
self.assertRaises(exceptions.NotFound,
self.client.rebuild,
nonexistent_server,
self.image_ref_alt,
name=new_name, meta=meta,
personality=personality,
adminPass='rebuild')
@test.attr(type=['negative', 'gate'])
def test_create_numeric_server_name(self):
server_name = 12345
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
name=server_name)
@test.attr(type=['negative', 'gate'])
def test_create_server_name_length_exceeds_256(self):
# Create a server with name length exceeding 256 characters
server_name = 'a' * 256
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
name=server_name)
@test.attr(type=['negative', 'gate'])
def test_create_with_invalid_network_uuid(self):
# Pass invalid network uuid while creating a server
networks = [{'fixed_ip': '10.0.1.1', 'uuid': 'a-b-c-d-e-f-g-h-i-j'}]
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
networks=networks)
@test.attr(type=['negative', 'gate'])
def test_create_with_non_existent_keypair(self):
# Pass a non-existent keypair while creating a server
key_name = data_utils.rand_name('key')
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
key_name=key_name)
@test.attr(type=['negative', 'gate'])
def test_create_server_metadata_exceeds_length_limit(self):
# Pass really long metadata while creating a server
metadata = {'a': 'b' * 260}
self.assertRaises(exceptions.OverLimit,
self.create_test_server,
meta=metadata)
@test.attr(type=['negative', 'gate'])
def test_update_name_of_non_existent_server(self):
# Update name of a non-existent server
server_name = data_utils.rand_name('server')
new_name = data_utils.rand_name('server') + '_updated'
self.assertRaises(exceptions.NotFound, self.client.update_server,
server_name, name=new_name)
@test.attr(type=['negative', 'gate'])
def test_update_server_set_empty_name(self):
# Update name of the server to an empty string
server_name = data_utils.rand_name('server')
new_name = ''
self.assertRaises(exceptions.BadRequest, self.client.update_server,
server_name, name=new_name)
@test.attr(type=['negative', 'gate'])
def test_update_server_of_another_tenant(self):
# Update name of a server that belongs to another tenant
new_name = self.server_id + '_new'
self.assertRaises(exceptions.NotFound,
self.alt_client.update_server, self.server_id,
name=new_name)
@test.attr(type=['negative', 'gate'])
def test_update_server_name_length_exceeds_256(self):
# Update name of server exceed the name length limit
new_name = 'a' * 256
self.assertRaises(exceptions.BadRequest,
self.client.update_server,
self.server_id,
name=new_name)
@test.attr(type=['negative', 'gate'])
def test_delete_non_existent_server(self):
# Delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.delete_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_delete_a_server_of_another_tenant(self):
# Delete a server that belongs to another tenant
self.assertRaises(exceptions.NotFound,
self.alt_client.delete_server,
self.server_id)
@test.attr(type=['negative', 'gate'])
def test_delete_server_pass_negative_id(self):
# Pass an invalid string parameter to delete server
self.assertRaises(exceptions.NotFound, self.client.delete_server, -1)
@test.attr(type=['negative', 'gate'])
def test_delete_server_pass_id_exceeding_length_limit(self):
# Pass a server ID that exceeds length limit to delete server
self.assertRaises(exceptions.NotFound, self.client.delete_server,
sys.maxint + 1)
@test.attr(type=['negative', 'gate'])
def test_create_with_nonexistent_security_group(self):
# Create a server with a nonexistent security group
security_groups = [{'name': 'does_not_exist'}]
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
security_groups=security_groups)
@test.attr(type=['negative', 'gate'])
def test_get_non_existent_server(self):
# Get a non existent server details
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.get_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_stop_non_existent_server(self):
# Stop a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.servers_client.stop,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_pause_non_existent_server(self):
# pause a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.pause_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_unpause_non_existent_server(self):
# unpause a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.unpause_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_unpause_server_invalid_state(self):
# unpause an active server.
self.assertRaises(exceptions.Conflict,
self.client.unpause_server,
self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_suspend_non_existent_server(self):
# suspend a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.suspend_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_suspend_server_invalid_state(self):
# suspend a suspended server.
resp, _ = self.client.suspend_server(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
self.assertRaises(exceptions.Conflict,
self.client.suspend_server,
self.server_id)
self.client.resume_server(self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_resume_non_existent_server(self):
# resume a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.resume_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_resume_server_invalid_state(self):
# resume an active server.
self.assertRaises(exceptions.Conflict,
self.client.resume_server,
self.server_id)
@test.attr(type=['negative', 'gate'])
def test_get_console_output_of_non_existent_server(self):
# get the console output for a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.get_console_output,
nonexistent_server, 10)
@test.attr(type=['negative', 'gate'])
def test_force_delete_nonexistent_server_id(self):
# force-delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.force_delete_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_restore_nonexistent_server_id(self):
# restore-delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.restore_soft_deleted_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_restore_server_invalid_state(self):
# we can only restore-delete a server in 'soft-delete' state
self.assertRaises(exceptions.Conflict,
self.client.restore_soft_deleted_server,
self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative', 'gate'])
def test_shelve_non_existent_server(self):
# shelve a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.shelve_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative', 'gate'])
def test_shelve_shelved_server(self):
# shelve a shelved server.
resp, server = self.client.shelve_server(self.server_id)
self.assertEqual(202, resp.status)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
self.client.wait_for_server_status(self.server_id,
'SHELVED_OFFLOADED',
extra_timeout=offload_time)
else:
self.client.wait_for_server_status(self.server_id,
'SHELVED')
resp, server = self.client.get_server(self.server_id)
image_name = server['name'] + '-shelved'
params = {'name': image_name}
resp, images = self.images_client.list_images(params)
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
self.assertRaises(exceptions.Conflict,
self.client.shelve_server,
self.server_id)
self.client.unshelve_server(self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative', 'gate'])
def test_unshelve_non_existent_server(self):
# unshelve a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.unshelve_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative', 'gate'])
def test_unshelve_server_invalid_state(self):
# unshelve an active server.
self.assertRaises(exceptions.Conflict,
self.client.unshelve_server,
self.server_id)
|
py | 1a338bde9d436a8b758a69549097b5770f4fd955 | # Generated by Django 3.0.4 on 2020-06-29 12:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mission', '0006_auto_20200625_1554'),
('piece', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='piece',
name='vehicule',
),
migrations.AddField(
model_name='piece',
name='modele',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='mission.Modele'),
preserve_default=False,
),
]
|
py | 1a338c0688204cbe4e74e187d276ce9f9ccbc839 | # Copyright (c) 2017 Yingxin Cheng
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class WFException(Exception):
def __init__(self, msg, e=None):
assert isinstance(msg, str)
if e:
assert isinstance(e, Exception)
self.e = e
super(WFException, self).__init__(msg)
def __str__(self):
return self._to_str(0)
def _to_str(self, indent):
ret = "\n%s%s" % ("> "*indent, self.args[0])
if self.e:
if isinstance(self.e, WFException):
ret += self.e._to_str(indent+1)
else:
ret += "\n%s%r" % ("> "*(indent+1), self.e)
return ret
|
py | 1a338c8e4c0da0ec101189e69989df0c33e4c04c | # coding: utf-8
"""
Selling Partner API for FBA Inventory
The Selling Partner API for FBA Inventory lets you programmatically retrieve information about inventory in Amazon's fulfillment network. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InventorySummary(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'asin': 'str',
'fn_sku': 'str',
'seller_sku': 'str',
'condition': 'str',
'inventory_details': 'InventoryDetails',
'last_updated_time': 'datetime',
'product_name': 'str',
'total_quantity': 'int'
}
attribute_map = {
'asin': 'asin',
'fn_sku': 'fnSku',
'seller_sku': 'sellerSku',
'condition': 'condition',
'inventory_details': 'inventoryDetails',
'last_updated_time': 'lastUpdatedTime',
'product_name': 'productName',
'total_quantity': 'totalQuantity'
}
def __init__(self, asin=None, fn_sku=None, seller_sku=None, condition=None, inventory_details=None, last_updated_time=None, product_name=None, total_quantity=None): # noqa: E501
"""InventorySummary - a model defined in Swagger""" # noqa: E501
self._asin = None
self._fn_sku = None
self._seller_sku = None
self._condition = None
self._inventory_details = None
self._last_updated_time = None
self._product_name = None
self._total_quantity = None
self.discriminator = None
if asin is not None:
self.asin = asin
if fn_sku is not None:
self.fn_sku = fn_sku
if seller_sku is not None:
self.seller_sku = seller_sku
if condition is not None:
self.condition = condition
if inventory_details is not None:
self.inventory_details = inventory_details
if last_updated_time is not None:
self.last_updated_time = last_updated_time
if product_name is not None:
self.product_name = product_name
if total_quantity is not None:
self.total_quantity = total_quantity
@property
def asin(self):
"""Gets the asin of this InventorySummary. # noqa: E501
The Amazon Standard Identification Number (ASIN) of an item. # noqa: E501
:return: The asin of this InventorySummary. # noqa: E501
:rtype: str
"""
return self._asin
@asin.setter
def asin(self, asin):
"""Sets the asin of this InventorySummary.
The Amazon Standard Identification Number (ASIN) of an item. # noqa: E501
:param asin: The asin of this InventorySummary. # noqa: E501
:type: str
"""
self._asin = asin
@property
def fn_sku(self):
"""Gets the fn_sku of this InventorySummary. # noqa: E501
Amazon's fulfillment network SKU identifier. # noqa: E501
:return: The fn_sku of this InventorySummary. # noqa: E501
:rtype: str
"""
return self._fn_sku
@fn_sku.setter
def fn_sku(self, fn_sku):
"""Sets the fn_sku of this InventorySummary.
Amazon's fulfillment network SKU identifier. # noqa: E501
:param fn_sku: The fn_sku of this InventorySummary. # noqa: E501
:type: str
"""
self._fn_sku = fn_sku
@property
def seller_sku(self):
"""Gets the seller_sku of this InventorySummary. # noqa: E501
The seller SKU of the item. # noqa: E501
:return: The seller_sku of this InventorySummary. # noqa: E501
:rtype: str
"""
return self._seller_sku
@seller_sku.setter
def seller_sku(self, seller_sku):
"""Sets the seller_sku of this InventorySummary.
The seller SKU of the item. # noqa: E501
:param seller_sku: The seller_sku of this InventorySummary. # noqa: E501
:type: str
"""
self._seller_sku = seller_sku
@property
def condition(self):
"""Gets the condition of this InventorySummary. # noqa: E501
The condition of the item as described by the seller (for example, New Item). # noqa: E501
:return: The condition of this InventorySummary. # noqa: E501
:rtype: str
"""
return self._condition
@condition.setter
def condition(self, condition):
"""Sets the condition of this InventorySummary.
The condition of the item as described by the seller (for example, New Item). # noqa: E501
:param condition: The condition of this InventorySummary. # noqa: E501
:type: str
"""
self._condition = condition
@property
def inventory_details(self):
"""Gets the inventory_details of this InventorySummary. # noqa: E501
:return: The inventory_details of this InventorySummary. # noqa: E501
:rtype: InventoryDetails
"""
return self._inventory_details
@inventory_details.setter
def inventory_details(self, inventory_details):
"""Sets the inventory_details of this InventorySummary.
:param inventory_details: The inventory_details of this InventorySummary. # noqa: E501
:type: InventoryDetails
"""
self._inventory_details = inventory_details
@property
def last_updated_time(self):
"""Gets the last_updated_time of this InventorySummary. # noqa: E501
The date and time that any quantity was last updated. # noqa: E501
:return: The last_updated_time of this InventorySummary. # noqa: E501
:rtype: datetime
"""
return self._last_updated_time
@last_updated_time.setter
def last_updated_time(self, last_updated_time):
"""Sets the last_updated_time of this InventorySummary.
The date and time that any quantity was last updated. # noqa: E501
:param last_updated_time: The last_updated_time of this InventorySummary. # noqa: E501
:type: datetime
"""
self._last_updated_time = last_updated_time
@property
def product_name(self):
"""Gets the product_name of this InventorySummary. # noqa: E501
The localized language product title of the item within the specific marketplace. # noqa: E501
:return: The product_name of this InventorySummary. # noqa: E501
:rtype: str
"""
return self._product_name
@product_name.setter
def product_name(self, product_name):
"""Sets the product_name of this InventorySummary.
The localized language product title of the item within the specific marketplace. # noqa: E501
:param product_name: The product_name of this InventorySummary. # noqa: E501
:type: str
"""
self._product_name = product_name
@property
def total_quantity(self):
"""Gets the total_quantity of this InventorySummary. # noqa: E501
The total number of units in an inbound shipment or in Amazon fulfillment centers. # noqa: E501
:return: The total_quantity of this InventorySummary. # noqa: E501
:rtype: int
"""
return self._total_quantity
@total_quantity.setter
def total_quantity(self, total_quantity):
"""Sets the total_quantity of this InventorySummary.
The total number of units in an inbound shipment or in Amazon fulfillment centers. # noqa: E501
:param total_quantity: The total_quantity of this InventorySummary. # noqa: E501
:type: int
"""
self._total_quantity = total_quantity
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InventorySummary, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InventorySummary):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a338d2f67cff9e423fb9e2a8d208cefb793cb86 | import argparse
import time
import statistics
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import brainflow
from brainflow.board_shim import BoardShim, BrainFlowInputParams, BoardIds, LogLevels
from brainflow.data_filter import DataFilter, FilterTypes, AggOperations
def main():
parser = argparse.ArgumentParser()
# use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port
parser.add_argument('--ip-port', type=int, help='ip port', required=False, default=0)
parser.add_argument('--ip-protocol', type=int, help='ip protocol, check IpProtocolType enum', required=False,
default=0)
parser.add_argument('--ip-address', type=str, help='ip address', required=False, default='')
parser.add_argument('--serial-port', type=str, help='serial port', required=False, default='')
parser.add_argument('--mac-address', type=str, help='mac address', required=False, default='')
parser.add_argument('--other-info', type=str, help='other info', required=False, default='')
parser.add_argument('--streamer-params', type=str, help='other info', required=False, default='')
parser.add_argument('--board-id', type=int, help='board id, check docs to get a list of supported boards',
required=True)
parser.add_argument('--log', action='store_true')
parser.add_argument('--run-time', type=int, help='run time for one iteration in sec', required=True)
parser.add_argument('--num-iters', type=int, help='number of iterations', default=1)
parser.add_argument('--channels', type=str, help='channels to plot in format 0,1,2 by default plot all channels',
default=None)
parser.add_argument('--config-file', type=str, help='file with strings to send to device', default=None)
args = parser.parse_args()
params = BrainFlowInputParams()
params.ip_port = args.ip_port
params.serial_port = args.serial_port
params.mac_address = args.mac_address
params.other_info = args.other_info
params.ip_address = args.ip_address
params.ip_protocol = args.ip_protocol
if (args.log):
BoardShim.enable_dev_board_logger()
else:
BoardShim.disable_board_logger()
# for streaming board need to use master board id
master_board_id = args.board_id
if args.board_id == BoardIds.STREAMING_BOARD.value:
master_board_id = int(params.other_info)
board = BoardShim(args.board_id, params)
board.prepare_session()
if args.config_file:
with open(args.config_file) as file:
lines = file.readlines()
for line in lines:
board.config_board(line)
buffer_size = int(BoardShim.get_sampling_rate(master_board_id) * args.run_time * 1.2) # + 20% for safety
if master_board_id in (
BoardIds.CYTON_BOARD.value, BoardIds.CYTON_WIFI_BOARD.value, BoardIds.GANGLION_WIFI_BOARD.value):
bytes_per_package = 33
elif master_board_id in (BoardIds.CYTON_DAISY_BOARD, BoardIds.CYTON_DAISY_WIFI_BOARD.value):
bytes_per_package = 66
elif master_board_id == BoardIds.SYNTHETIC_BOARD.value:
bytes_per_package = 104
elif master_board_id == BoardIds.NOVAXR_BOARD.value:
bytes_per_package = 72
else:
raise ValueError('unsupported board')
timestamp_channel = BoardShim.get_timestamp_channel(master_board_id)
package_num_channel = BoardShim.get_package_num_channel(master_board_id)
try:
cur_id = 0
for i in range(args.num_iters):
# wait for an input
input('Press Enter to continue...')
BoardShim.log_message(LogLevels.LEVEL_INFO.value, '\nRunning iteration %d/%d\n' % (i, args.num_iters))
# start stream and get data
board.start_stream(buffer_size, args.streamer_params)
time.sleep(args.run_time)
board.stop_stream()
data = board.get_board_data()
if data.shape[1] == 0:
BoardShim.log_message(LogLevels.LEVEL_WARN.value, '\nNo data received!\n')
continue
# calculate some metrics
total_bytes_received = bytes_per_package * data.shape[1]
packages_per_sec = float(data.shape[1]) / float(args.run_time);
timestamp_array = data[timestamp_channel]
time_diff_array = list()
for j in range(0, timestamp_array.size - 1):
time_diff_array.append(timestamp_array[j + 1] - timestamp_array[j])
package_num_array = data[package_num_channel]
lost_packages = 0
expected = package_num_array[0]
while cur_id < package_num_array.size:
if expected == 256:
expected = 0
if package_num_array[cur_id] != expected:
BoardShim.log_message(LogLevels.LEVEL_WARN.value,
'package loss detected: position %d package_num value %d expected value %d' % (
cur_id, package_num_array[cur_id], expected))
lost_packages = lost_packages + 1
else:
cur_id = cur_id + 1
expected = expected + 1
package_loss = (lost_packages / data.shape[1]) * 100
# provide results for iteration
BoardShim.log_message(LogLevels.LEVEL_INFO.value, '\nResults:\n')
BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'package loss percent %f' % package_loss)
BoardShim.log_message(LogLevels.LEVEL_INFO.value,
'average time delta %f' % statistics.mean(time_diff_array))
BoardShim.log_message(LogLevels.LEVEL_INFO.value,
'std deviation of time delta %f' % statistics.pstdev(time_diff_array))
BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'total packages received %d' % data.shape[1])
BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'packages per sec %f' % packages_per_sec)
BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'total bytes received %d' % total_bytes_received)
# plot data
eeg_channels = BoardShim.get_eeg_channels(master_board_id)
emg_channels = BoardShim.get_emg_channels(master_board_id)
total_channels = list()
if args.channels is not None:
selected_channels = [int(x) for x in args.channels.split(',')]
temp_channels = eeg_channels
for ch in emg_channels:
if ch not in temp_channels:
temp_channels.append(ch)
for j in range(len(temp_channels)):
if j in selected_channels:
total_channels.append(temp_channels[j])
else:
# for cyton/ganglion eeg_channels and emg_channels are the same array because we can not split it
# for novaxr its 2 different arrays, join them
total_channels = eeg_channels
for ch in emg_channels:
if ch not in total_channels:
total_channels.append(ch)
total_channels.append(timestamp_channel)
columns = list()
for j in range(len(total_channels) - 1):
columns.append('channel_%d' % (int(total_channels[j]) - 1))
columns.append('timestamp')
df = pd.DataFrame(np.transpose(data))
df.to_csv('all_data_%d.csv' % i)
df_to_plot = df[total_channels]
df_to_plot.columns = columns
df_to_plot.to_csv('selected_data_%d.csv' % i)
df_to_plot.plot(subplots=True, x='timestamp', style='.-')
plt.show()
finally:
# release session in the end
board.release_session()
if __name__ == "__main__":
main()
|
py | 1a338d7ef8dc76432275ae187092bc4cc23be4a1 | # coding: utf-8
"""
Japanese WordNet APIs
Return all WordNet words.<BR />[Endpoint] https://api.apitore.com/api/40 # noqa: E501
OpenAPI spec version: 0.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.link_words_entity import LinkWordsEntity # noqa: E501
from swagger_client.rest import ApiException
class TestLinkWordsEntity(unittest.TestCase):
"""LinkWordsEntity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testLinkWordsEntity(self):
"""Test LinkWordsEntity"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.link_words_entity.LinkWordsEntity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a338ec9fbd5c066d0ee3ea542bc637bbaeec20c | from common_fixtures import * # NOQA
TEST_SERVICE_OPT_IMAGE = 'ibuildthecloud/helloworld'
TEST_SERVICE_OPT_IMAGE_LATEST = TEST_SERVICE_OPT_IMAGE + ':latest'
TEST_SERVICE_OPT_IMAGE_UUID = 'docker:' + TEST_SERVICE_OPT_IMAGE_LATEST
LB_IMAGE_UUID = "docker:sangeetha/testlbsd:latest"
logger = logging.getLogger(__name__)
if_compose_data_files = pytest.mark.skipif(
not os.environ.get('CATTLE_TEST_DATA_DIR'),
reason='Docker compose files directory location not set')
def test_rancher_compose_service(client,
rancher_cli_container,
socat_containers):
vol_container = client.create_container(imageUuid=TEST_IMAGE_UUID,
name=random_str(),
labels={"c1": "vol"}
)
vol_container = client.wait_success(vol_container)
volume_in_host = "/test/container"
volume_in_container = "/test/vol1"
docker_vol_value = volume_in_host + ":" + volume_in_container + ":ro"
cap_add = ["CHOWN"]
cap_drop = ["KILL"]
restart_policy = {"maximumRetryCount": 10, "name": "on-failure"}
dns_search = ['1.2.3.4']
dns_name = ['1.2.3.4']
domain_name = "rancher.io"
host_name = "test"
user = "root"
command = ["sleep", "9000"]
env_var = {"TEST_FILE": "/etc/testpath.conf"}
memory = 8000000
cpu_set = "0"
cpu_shares = 400
# Not including "dataVolumesFrom": [vol_container.id] since it is not
# implemented yet
launch_config = {"imageUuid": TEST_SERVICE_OPT_IMAGE_UUID,
"command": command,
"dataVolumes": [docker_vol_value],
"environment": env_var,
"capAdd": cap_add,
"capDrop": cap_drop,
"dnsSearch": dns_search,
"dns": dns_name,
"privileged": True,
"domainName": domain_name,
"stdinOpen": True,
"tty": True,
"memory": memory,
"cpuSet": cpu_set,
"cpuShares": cpu_shares,
"restartPolicy": restart_policy,
"directory": "/",
"hostname": host_name,
"user": user,
"labels":
{"io.rancher.scheduler.affinity:container_label":
"c1=vol"}
}
scale = 1
service, env = create_env_and_svc(client, launch_config,
scale)
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
check_container_in_service(client, rancher_service)
container_list = get_service_container_list(client, rancher_service)
dns_name.append(RANCHER_DNS_SERVER)
dns_search.append(rancher_env.name+"."+RANCHER_DNS_SEARCH)
dns_search.append(
rancher_service.name+"."+rancher_env.name+"."+RANCHER_DNS_SEARCH)
dns_search.append(RANCHER_DNS_SEARCH)
for c in container_list:
print(c)
docker_client = get_docker_client(c.hosts[0])
inspect = docker_client.inspect_container(c.externalId)
assert docker_vol_value in inspect["HostConfig"]["Binds"]
# assert inspect["HostConfig"]["VolumesFrom"] == \
# [vol_container.externalId]
assert inspect["HostConfig"]["PublishAllPorts"] is False
assert inspect["HostConfig"]["Privileged"] is True
assert inspect["Config"]["OpenStdin"] is True
assert inspect["Config"]["Tty"] is True
assert inspect["HostConfig"]["Dns"] == dns_name
assert inspect["HostConfig"]["DnsSearch"] == dns_search
assert inspect["Config"]["Hostname"] == host_name
assert inspect["Config"]["Domainname"] == domain_name
assert inspect["Config"]["User"] == user
assert inspect["HostConfig"]["CapAdd"] == cap_add
assert inspect["HostConfig"]["CapDrop"] == cap_drop
# assert inspect["Config"]["Cpuset"] == cpu_set
# No support for restart
assert inspect["HostConfig"]["RestartPolicy"]["Name"] == ""
assert \
inspect["HostConfig"]["RestartPolicy"]["MaximumRetryCount"] == 0
assert inspect["Config"]["Cmd"] == command
assert inspect["HostConfig"]["Memory"] == memory
assert "TEST_FILE=/etc/testpath.conf" in inspect["Config"]["Env"]
assert inspect["HostConfig"]["CpuShares"] == cpu_shares
delete_all(client, [env, rancher_env])
def test_rancher_compose_service_option_2(client,
rancher_cli_container,
socat_containers):
hosts = client.list_host(kind='docker', removed_null=True, state="active").data
cpu_shares = 400
ulimit = {"hard": 1024, "name": "cpu", "soft": 1024}
ulimit_inspect = {"Hard": 1024, "Name": "cpu", "Soft": 1024}
ipcMode = "host"
sysctls = {"net.ipv4.ip_forward": "1"}
dev_opts = {
'/dev/null': {
'readIops': 2000,
'writeIops': 3000,
'readBps': 4000,
'writeBps': 200,
}
}
cpu_shares = 400
blkio_weight = 1000
cpu_period = 10000
cpu_quota = 20000
cpu_set = "0"
cpu_setmems = "0"
dns_opt = ["abc"]
group_add = ["root"]
kernel_memory = 6000000
memory_reservation = 5000000
memory_swap = -1
memory_swappiness = 100
oom_killdisable = True
oom_scoreadj = 100
read_only = True
shm_size = 1024
stop_signal = "SIGTERM"
uts = "host"
memory = 8000000
dev_opts_inspect = {u"Path": "/dev/null",
u"Rate": 400}
cgroup_parent = "xyz"
extraHosts = ["host1:10.1.1.1", "host2:10.2.2.2"]
tmp_fs = {"/tmp": "rw"}
security_opt = ["label=user:USER", "label=role:ROLE"]
launch_config = {"imageUuid": TEST_SERVICE_OPT_IMAGE_UUID,
"extraHosts": extraHosts,
"privileged": True,
"cpuShares": cpu_shares,
"blkioWeight": blkio_weight,
"blkioDeviceOptions": dev_opts,
"cgroupParent": cgroup_parent,
"cpuShares": cpu_shares,
"cpuPeriod": cpu_period,
"cpuQuota": cpu_quota,
"cpuSet": cpu_set,
"cpuSetMems": cpu_setmems,
"dnsOpt": dns_opt,
"groupAdd": group_add,
"kernelMemory": kernel_memory,
"memory": memory,
"memoryReservation": memory_reservation,
"memorySwap": memory_swap,
"memorySwappiness": memory_swappiness,
"oomKillDisable": oom_killdisable,
"oomScoreAdj": oom_scoreadj,
"readOnly": read_only,
"securityOpt": security_opt,
"shmSize": shm_size,
"stopSignal": stop_signal,
"sysctls": sysctls,
"tmpfs": tmp_fs,
"ulimits": [ulimit],
"ipcMode": ipcMode,
"uts": uts,
"requestedHostId": hosts[0].id
}
scale = 2
service, env = create_env_and_svc(client, launch_config,
scale)
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
check_container_in_service(client, rancher_service)
container_list = get_service_container_list(client, rancher_service)
for c in container_list:
docker_client = get_docker_client(c.hosts[0])
inspect = docker_client.inspect_container(c.externalId)
assert inspect["HostConfig"]["ExtraHosts"] == extraHosts
assert inspect["HostConfig"]["BlkioWeight"] == blkio_weight
dev_opts_inspect["Path"] = "/dev/null"
dev_opts_inspect["Rate"] = 4000
assert \
inspect["HostConfig"]["BlkioDeviceReadBps"] == [dev_opts_inspect]
dev_opts_inspect["Path"] = "/dev/null"
dev_opts_inspect["Rate"] = 200
assert \
inspect["HostConfig"]["BlkioDeviceWriteBps"] == [dev_opts_inspect]
dev_opts_inspect["Path"] = "/dev/null"
dev_opts_inspect["Rate"] = 2000
assert \
inspect["HostConfig"]["BlkioDeviceReadIOps"] == [dev_opts_inspect]
dev_opts_inspect["Path"] = "/dev/null"
dev_opts_inspect["Rate"] = 3000
assert \
inspect["HostConfig"]["BlkioDeviceWriteIOps"] == [dev_opts_inspect]
assert inspect["HostConfig"]["CpuShares"] == cpu_shares
assert inspect["HostConfig"]["CgroupParent"] == cgroup_parent
assert inspect["HostConfig"]["CpuPeriod"] == cpu_period
assert inspect["HostConfig"]["CpuQuota"] == cpu_quota
assert inspect["HostConfig"]["CpusetCpus"] == cpu_set
# Bug - 6700
"""
assert inspect["HostConfig"]["CpusetMems"] == cpu_setmems
assert inspect["HostConfig"]["KernelMemory"] == kernel_memory
"""
assert inspect["HostConfig"]["MemoryReservation"] == memory_reservation
assert inspect["HostConfig"]["MemorySwap"] == memory_swap
assert inspect["HostConfig"]["MemorySwappiness"] == memory_swappiness
assert inspect["HostConfig"]["OomKillDisable"]
assert inspect["HostConfig"]["OomScoreAdj"] == oom_scoreadj
assert inspect["HostConfig"]["ReadonlyRootfs"]
assert inspect["HostConfig"]["SecurityOpt"] == security_opt
assert inspect["HostConfig"]["Tmpfs"] == tmp_fs
assert inspect["HostConfig"]["ShmSize"] == shm_size
assert inspect["Config"]["StopSignal"] == stop_signal
assert inspect["HostConfig"]["Ulimits"] == [ulimit_inspect]
assert inspect["HostConfig"]["IpcMode"] == ipcMode
assert inspect["HostConfig"]["UTSMode"] == uts
assert inspect["HostConfig"]["DnsOptions"] == dns_opt
assert inspect["HostConfig"]["GroupAdd"] == group_add
delete_all(client, [env])
@pytest.mark.skipif(True, reason='not implemented yet')
def test_rancher_compose_services_port_and_link_options(
client, rancher_cli_container, socat_containers):
hosts = client.list_host(kind='docker', removed_null=True, state="active").data
host = hosts[0]
link_host = hosts[1]
link_name = "WEB1"
link_port = 80
exposed_port = 9999
link_container = client.create_container(
imageUuid=LB_IMAGE_UUID,
environment={'CONTAINER_NAME': link_name},
name=random_str(),
requestedHostId=host.id
)
link_container = client.wait_success(link_container)
launch_config = {"imageUuid": SSH_IMAGE_UUID,
"ports": [str(exposed_port)+":22/tcp"],
"instanceLinks": {
link_name:
link_container.id},
"requestedHostId": link_host.id,
}
service, env = create_env_and_svc(client, launch_config, 1)
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
container_name = get_container_name(rancher_env, rancher_service, 1)
containers = client.list_container(name=container_name, state="running").data
assert len(containers) == 1
con = containers[0]
validate_exposed_port_and_container_link(client, con, link_name,
link_port, exposed_port)
delete_all(client, [env, rancher_env, link_container])
def test_rancher_compose_lbservice(client,
rancher_cli_container):
port = "7900"
# Add LB service and do not activate services
service_scale = 2
lb_scale = 1
env, service, lb_service = create_env_with_svc_and_lb(
client, service_scale, lb_scale, port)
# Add another target to LB service
launch_config = {"imageUuid": WEB_IMAGE_UUID}
service_name = random_str()
service1 = client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config,
scale=2)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service = activate_svc(client, service)
service1 = activate_svc(client, service1)
# Set LB targets
port_rules = lb_service.lbConfig.portRules
protocol = "http"
target_port = "80"
service_id = service1.id
port_rule = {"sourcePort": port, "protocol": protocol,
"serviceId": service_id, "targetPort": target_port}
port_rules.append(port_rule)
lb_service = client.update(lb_service,
lbConfig=create_lb_config(port_rules))
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_lb_service = get_rancher_compose_service(
client, rancher_env.id, lb_service)
client.wait_success(rancher_lb_service)
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
client.wait_success(rancher_service)
rancher_service1 = get_rancher_compose_service(
client, rancher_env.id, service1)
client.wait_success(rancher_service1)
validate_lb_service(client, rancher_lb_service, port,
[rancher_service, rancher_service1])
delete_all(client, [env, rancher_env])
def test_rancher_compose_lbservice_internal(client,
rancher_cli_container):
port = "7911"
con_port = "7912"
# Deploy container in same network to test accessibility of internal LB
hosts = client.list_host(kind='docker', removed_null=True, state="active").data
assert len(hosts) > 0
host = hosts[0]
client_con = client.create_container(
name=random_str(), imageUuid=SSH_IMAGE_UUID,
ports=[con_port+":22/tcp"], requestedHostId=host.id)
client_con = client.wait_success(client_con, 120)
assert client_con.state == "running"
# Add an internal LB service and do not activate services
service_scale = 2
lb_scale = 1
env, service, lb_service = create_env_with_svc_and_lb(
client, service_scale, lb_scale, port, internal=True)
# Add another target to LB service
launch_config = {"imageUuid": WEB_IMAGE_UUID}
service_name = random_str()
service1 = client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config,
scale=2)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service = activate_svc(client, service)
service1 = activate_svc(client, service1)
# Set LB targets
port_rules = lb_service.lbConfig.portRules
protocol = "http"
target_port = "80"
service_id = service1.id
port_rule = {"sourcePort": port, "protocol": protocol,
"serviceId": service_id, "targetPort": target_port}
port_rules.append(port_rule)
lb_service = client.update(lb_service,
lbConfig=create_lb_config(port_rules))
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_lb_service = get_rancher_compose_service(
client, rancher_env.id, lb_service)
client.wait_success(rancher_lb_service)
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
client.wait_success(rancher_service)
rancher_service1 = get_rancher_compose_service(
client, rancher_env.id, service1)
client.wait_success(rancher_service1)
time.sleep(20)
validate_internal_lb(client, rancher_lb_service,
[rancher_service, rancher_service1],
host, con_port, port)
# Check that port in the host where LB Agent is running is not accessible
lb_containers = get_service_container_list(
client, rancher_lb_service)
assert len(lb_containers) == lb_service.scale
for lb_con in lb_containers:
host = client.by_id('host', lb_con.hosts[0].id)
assert check_for_no_access(host, port)
delete_all(client, [env, rancher_env])
def test_rancher_compose_service_links(client,
rancher_cli_container):
port = "7901"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_env_with_2_svc(
client, service_scale, consumed_service_scale, port)
service_link = {"serviceId": consumed_service.id, "ports": ["80"]}
service.addservicelink(serviceLink=service_link)
service = client.wait_success(service, 120)
# Launch env using docker compose
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
rancher_consumed_service = get_rancher_compose_service(
client, rancher_env.id, consumed_service)
client.wait_success(rancher_service)
client.wait_success(rancher_consumed_service)
validate_add_service_link(client, rancher_service,
rancher_consumed_service)
validate_linked_service(client, rancher_service,
[rancher_consumed_service], port)
delete_all(client, [env, rancher_env])
def test_rancher_compose_dns_services(client,
rancher_cli_container):
port = "7902"
rancher_compose_dns_services(client, port,
rancher_cli_container)
def test_rancher_compose_dns_services_cross_stack(client,
rancher_cli_container):
port = "7903"
rancher_compose_dns_services(client, port,
rancher_cli_container, True)
def test_rancher_compose_external_services(client,
rancher_cli_container):
port = "7904"
service_scale = 1
env, service, ext_service, con_list = create_env_with_ext_svc(
client, service_scale, port)
service_link = {"serviceId": ext_service.id}
service.addservicelink(serviceLink=service_link)
# Launch env using docker compose
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
rancher_ext_service = get_rancher_compose_service(
client, rancher_env.id, ext_service)
client.wait_success(con_list[0])
client.wait_success(con_list[1])
client.wait_success(rancher_service)
client.wait_success(rancher_ext_service)
validate_add_service_link(client, rancher_service,
rancher_ext_service)
validate_external_service(client, rancher_service,
[rancher_ext_service],
port, con_list)
delete_all(client, [env, rancher_env])
def test_rancher_compose_lbservice_host_routing(client,
rancher_cli_container):
port1 = "7906"
service_scale = 2
lb_scale = 1
service_count = 3
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1], service_count, port_rules)
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_lb_service = get_rancher_compose_service(client, rancher_env.id,
lb_service)
client.wait_success(rancher_lb_service)
rancher_service = get_rancher_compose_service(
client, rancher_env.id, services[0])
client.wait_success(rancher_service)
rancher_service1 = get_rancher_compose_service(
client, rancher_env.id, services[1])
client.wait_success(rancher_service1)
rancher_service2 = get_rancher_compose_service(
client, rancher_env.id, services[2])
client.wait_success(rancher_service2)
validate_lb_service(client,
rancher_lb_service, port1,
[rancher_service, rancher_service1],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
rancher_lb_service, port1,
[rancher_service, rancher_service1],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
rancher_lb_service, port1, [rancher_service2],
"www.abc1.com", "/name.html")
validate_lb_service(client,
rancher_lb_service, port1, [rancher_service2],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, rancher_lb_service, port1,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(client, rancher_lb_service, port1,
"www.abc2.com",
"/service1.html")
delete_all(client, [env, rancher_env])
def test_rancher_compose_lbservice_multiple_port(client,
rancher_cli_container):
port1 = "7907"
port2 = "7908"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service3.html",
"serviceId": 1,
"sourcePort": port2,
"targetPort": "81",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1, port2],
service_count, port_rules)
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_lb_service = get_rancher_compose_service(client, rancher_env.id,
lb_service)
client.wait_success(rancher_lb_service)
rancher_service = get_rancher_compose_service(
client, rancher_env.id, services[0])
client.wait_success(rancher_service)
rancher_service1 = get_rancher_compose_service(
client, rancher_env.id, services[1])
client.wait_success(rancher_service1)
validate_lb_service(client,
rancher_lb_service, port1, [rancher_service],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
rancher_lb_service, port2, [rancher_service1],
"www.abc2.com", "/service3.html")
delete_all(client, [env, rancher_env])
def test_rancher_compose_external_services_hostname(client,
rancher_cli_container):
port = "7904"
service_scale = 1
env, service, ext_service, con_list = create_env_with_ext_svc(
client, service_scale, port, True)
service_link = {"serviceId": ext_service.id}
service.addservicelink(serviceLink=service_link)
# Launch env using docker compose
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
rancher_ext_service = get_rancher_compose_service(
client, rancher_env.id, ext_service)
client.wait_success(rancher_service)
client.wait_success(rancher_ext_service)
validate_add_service_link(client, rancher_service,
rancher_ext_service)
validate_external_service_for_hostname(client, rancher_service,
[rancher_ext_service], port)
delete_all(client, [env, rancher_env])
def rancher_compose_dns_services(client, port,
rancher_cli_container,
cross_linking=False):
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port, cross_linking)
service_link = {"serviceId": dns.id}
service.addservicelink(serviceLink=service_link)
service_link = {"serviceId": consumed_service.id}
dns.addservicelink(serviceLink=service_link)
service_link = {"serviceId": consumed_service1.id}
dns.addservicelink(serviceLink=service_link)
# Launch dns env using docker compose
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
# Launch envs using docker compose
if cross_linking:
# Launch Consumed Service2
env_con = get_env(client, consumed_service)
env_con = env_con.activateservices()
env_con = client.wait_success(env_con, 120)
assert env_con.state == "active"
con_service1_id = env_con.id
# Launch Consumed Service1
env_con1 = get_env(client, consumed_service1)
env_con1 = env_con1.activateservices()
env_con1 = client.wait_success(env_con1, 120)
assert env_con1.state == "active"
con_service2_id = env_con1.id
else:
con_service1_id = rancher_env.id
con_service2_id = rancher_env.id
rancher_consumed_service = get_rancher_compose_service(
client, con_service1_id, consumed_service)
rancher_consumed_service1 = get_rancher_compose_service(
client, con_service2_id, consumed_service1)
rancher_dns = get_rancher_compose_service(
client, rancher_env.id, dns)
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
client.wait_success(rancher_dns)
client.wait_success(rancher_consumed_service)
client.wait_success(rancher_consumed_service1)
client.wait_success(rancher_service)
validate_add_service_link(client, rancher_service,
rancher_dns)
validate_add_service_link(client, rancher_dns,
rancher_consumed_service)
validate_add_service_link(client, rancher_dns,
rancher_consumed_service1)
validate_dns_service(client, rancher_service,
[rancher_consumed_service, rancher_consumed_service1],
port, rancher_dns.name)
to_delete = [env, rancher_env]
if cross_linking:
to_delete.append(env_con)
to_delete.append(env_con1)
delete_all(client, to_delete)
def get_rancher_compose_service(client, rancher_env_id, service):
rancher_services = client.list_service(name=service.name,
stackId=rancher_env_id,
removed_null=True).data
assert len(rancher_services) == 1
rancher_service = rancher_services[0]
print(service.kind)
if service.kind != 'externalService' and service.kind != 'dnsService':
assert rancher_service.scale == service.scale
rancher_service = client.wait_success(rancher_service, 120)
return rancher_service
|
py | 1a338ed6ad97456fc4f0c0079a85ea4efde99ee8 | ## MadLib
"""
Create a "MadLib" that asks the user for various pieces of
information. Store the information as strings. Once the
information has been collected, output a story using the
stored information. You can find a template if you don't want to
make one: http://www.madlibs.com/
"""
"""
This one is quite a step up. It involves a few new concepts, such
as storing user input.
First of all, I can see that we're going to want to repeatedly
ask a user for input, and store their answers somewhere. For this
task, we could use an array like FruitLoops, but let's not fill
it up yet:
"""
info_list = []
"""
Side note: this isn't really an array. It's a list. I lied.
Python is weird and does lots of strange stuff that other
languages cry about so it's not worth getting caught up on.
For our purposes, a list can be dynamically added to, which
is what we want.
Now let's define a function to get some input from the user and
put it in the info_array:
"""
def get_input_from_user(prompt):
answer = input(prompt)
info_list.append(answer)
"""
Python's input() function handles most of the heavy lifting here.
Put simply, the code above allows us to pass a prompt to the
function, which it will then ask the user, and add the reply
to our list of information.
Next, we need a story of sorts. We can make up whatever we want.
I'm going to mark out a basic one here, with placeholders for
the words we want our user to fill in:
This morning, I <past tense action> and then get up and
had a shower. In the shower, an <animal> flew in the window
and scared me so bad that I screamed for my <relative>.
Next, let's get our user to fill in our placeholders:
"""
get_input_from_user("Please give me a past tense action: ")
get_input_from_user("Please give me an animal: ")
get_input_from_user("Please give me a relative: ")
"""
Now that we've got those stored, let's construct our story string
and print it back out. We're going to use f-formatting here,
which allows us to use curled brackets around variables names
inside a string to save us doing lots of string + string = string
"""
my_story = f"This morning, I {info_list[0]} and then got up "\
f"and had a shower. In the shower, a"\
f" {info_list[1]} flew in the window and scared me "\
f"so bad that I screamed for my {info_list[2]}."
"""
Note the trailing backslashes which concatenate the lines of text.
Now, we just print it out:
"""
print(my_story)
"""
Side note: For general purposes, it's nice to not have to think
about which index you're looking for in a data structure if
you're just iterating over them, as above. We can, when using
iterable objects, define an iterator:
"""
info_iter = iter(info_list)
"""
And instead repeatedly call next() on it to achieve the same
result as above without hard-coding any particular values in:
"""
my_iterated_story = f"This morning, I {next(info_iter)} and "\
f"then got up and had a shower. In the "\
f"shower, a {next(info_iter)} flew in the "\
f"window and scared me so bad that I "\
f"screamed for my {next(info_iter)}"
|
py | 1a338ef571120eea0b3772b63be1ddefeeb2124d | from __future__ import unicode_literals
from django.test import TestCase
from modelcluster.forms import ClusterForm, transientmodelformset_factory, childformset_factory
from tests.models import NewsPaper, Article, Author, Band, BandMember, Album, Song
class TransientFormsetTest(TestCase):
BandMembersFormset = transientmodelformset_factory(BandMember, exclude=['band'], extra=3, can_delete=True)
def test_can_create_formset(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
band_members_formset = self.BandMembersFormset(queryset=beatles.members.all())
self.assertEqual(5, len(band_members_formset.forms))
self.assertEqual('John Lennon', band_members_formset.forms[0].instance.name)
def test_incoming_formset_data(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='George Harrison'),
])
band_members_formset = self.BandMembersFormset({
'form-TOTAL_FORMS': 3,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'John Lennon',
'form-0-id': '',
'form-1-name': 'Paul McCartney',
'form-1-id': '',
'form-2-name': '',
'form-2-id': '',
}, queryset=beatles.members.all())
self.assertTrue(band_members_formset.is_valid())
members = band_members_formset.save(commit=False)
self.assertEqual(2, len(members))
self.assertEqual('John Lennon', members[0].name)
# should not exist in the database yet
self.assertFalse(BandMember.objects.filter(name='John Lennon').exists())
def test_save_commit_false(self):
john = BandMember(name='John Lennon')
paul = BandMember(name='Paul McCartney')
ringo = BandMember(name='Richard Starkey')
beatles = Band(name='The Beatles', members=[
john, paul, ringo
])
beatles.save()
john_id, paul_id, ringo_id = john.id, paul.id, ringo.id
self.assertTrue(john_id)
self.assertTrue(paul_id)
band_members_formset = self.BandMembersFormset({
'form-TOTAL_FORMS': 5,
'form-INITIAL_FORMS': 3,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'John Lennon',
'form-0-DELETE': 'form-0-DELETE',
'form-0-id': john_id,
'form-1-name': 'Paul McCartney',
'form-1-id': paul_id,
'form-2-name': 'Ringo Starr', # changing data of an existing record
'form-2-id': ringo_id,
'form-3-name': '',
'form-3-id': '',
'form-4-name': 'George Harrison', # Adding a record
'form-4-id': '',
}, queryset=beatles.members.all())
self.assertTrue(band_members_formset.is_valid())
updated_members = band_members_formset.save(commit=False)
self.assertEqual(2, len(updated_members))
self.assertEqual('Ringo Starr', updated_members[0].name)
self.assertEqual(ringo_id, updated_members[0].id)
# should not be updated in the db yet
self.assertEqual('Richard Starkey', BandMember.objects.get(id=ringo_id).name)
self.assertEqual('George Harrison', updated_members[1].name)
self.assertFalse(updated_members[1].id) # no ID yet
def test_save_commit_true(self):
john = BandMember(name='John Lennon')
paul = BandMember(name='Paul McCartney')
ringo = BandMember(name='Richard Starkey')
beatles = Band(name='The Beatles', members=[
john, paul, ringo
])
beatles.save()
john_id, paul_id, ringo_id = john.id, paul.id, ringo.id
self.assertTrue(john_id)
self.assertTrue(paul_id)
band_members_formset = self.BandMembersFormset({
'form-TOTAL_FORMS': 4,
'form-INITIAL_FORMS': 3,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'John Lennon',
'form-0-DELETE': 'form-0-DELETE',
'form-0-id': john_id,
'form-1-name': 'Paul McCartney',
'form-1-id': paul_id,
'form-2-name': 'Ringo Starr', # changing data of an existing record
'form-2-id': ringo_id,
'form-3-name': '',
'form-3-id': '',
}, queryset=beatles.members.all())
self.assertTrue(band_members_formset.is_valid())
updated_members = band_members_formset.save()
self.assertEqual(1, len(updated_members))
self.assertEqual('Ringo Starr', updated_members[0].name)
self.assertEqual(ringo_id, updated_members[0].id)
self.assertFalse(BandMember.objects.filter(id=john_id).exists())
self.assertEqual('Paul McCartney', BandMember.objects.get(id=paul_id).name)
self.assertEqual(beatles.id, BandMember.objects.get(id=paul_id).band_id)
self.assertEqual('Ringo Starr', BandMember.objects.get(id=ringo_id).name)
self.assertEqual(beatles.id, BandMember.objects.get(id=ringo_id).band_id)
class ChildFormsetTest(TestCase):
def test_can_create_formset(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
BandMembersFormset = childformset_factory(Band, BandMember, extra=3)
band_members_formset = BandMembersFormset(instance=beatles)
self.assertEqual(5, len(band_members_formset.forms))
self.assertEqual('John Lennon', band_members_formset.forms[0].instance.name)
def test_empty_formset(self):
BandMembersFormset = childformset_factory(Band, BandMember, extra=3)
band_members_formset = BandMembersFormset()
self.assertEqual(3, len(band_members_formset.forms))
def test_save_commit_false(self):
john = BandMember(name='John Lennon')
paul = BandMember(name='Paul McCartney')
ringo = BandMember(name='Richard Starkey')
beatles = Band(name='The Beatles', members=[
john, paul, ringo
])
beatles.save()
john_id, paul_id, ringo_id = john.id, paul.id, ringo.id
BandMembersFormset = childformset_factory(Band, BandMember, extra=3)
band_members_formset = BandMembersFormset({
'form-TOTAL_FORMS': 5,
'form-INITIAL_FORMS': 3,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'John Lennon',
'form-0-DELETE': 'form-0-DELETE',
'form-0-id': john_id,
'form-1-name': 'Paul McCartney',
'form-1-id': paul_id,
'form-2-name': 'Ringo Starr', # changing data of an existing record
'form-2-id': ringo_id,
'form-3-name': '',
'form-3-id': '',
'form-4-name': 'George Harrison', # adding a record
'form-4-id': '',
}, instance=beatles)
self.assertTrue(band_members_formset.is_valid())
updated_members = band_members_formset.save(commit=False)
# updated_members should only include the items that have been changed and not deleted
self.assertEqual(2, len(updated_members))
self.assertEqual('Ringo Starr', updated_members[0].name)
self.assertEqual(ringo_id, updated_members[0].id)
self.assertEqual('George Harrison', updated_members[1].name)
self.assertEqual(None, updated_members[1].id)
# Changes should not be committed to the db yet
self.assertTrue(BandMember.objects.filter(name='John Lennon', id=john_id).exists())
self.assertEqual('Richard Starkey', BandMember.objects.get(id=ringo_id).name)
self.assertFalse(BandMember.objects.filter(name='George Harrison').exists())
beatles.members.commit()
# this should create/update/delete database entries
self.assertEqual('Ringo Starr', BandMember.objects.get(id=ringo_id).name)
self.assertTrue(BandMember.objects.filter(name='George Harrison').exists())
self.assertFalse(BandMember.objects.filter(name='John Lennon').exists())
def test_child_updates_without_ids(self):
john = BandMember(name='John Lennon')
beatles = Band(name='The Beatles', members=[
john
])
beatles.save()
john_id = john.id
paul = BandMember(name='Paul McCartney')
beatles.members.add(paul)
BandMembersFormset = childformset_factory(Band, BandMember, extra=3)
band_members_formset = BandMembersFormset({
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'John Lennon',
'form-0-id': john_id,
'form-1-name': 'Paul McCartney', # NB no way to know programmatically that this form corresponds to the 'paul' object
'form-1-id': '',
}, instance=beatles)
self.assertTrue(band_members_formset.is_valid())
band_members_formset.save(commit=False)
self.assertEqual(2, beatles.members.count())
def test_max_num_ignored_in_validation_when_validate_max_false(self):
BandMembersFormset = childformset_factory(Band, BandMember, max_num=2)
band_members_formset = BandMembersFormset({
'form-TOTAL_FORMS': 3,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'John Lennon',
'form-0-id': '',
'form-1-name': 'Paul McCartney',
'form-1-id': '',
'form-2-name': 'Ringo Starr',
'form-2-id': '',
})
self.assertTrue(band_members_formset.is_valid())
def test_max_num_fail_validation(self):
BandMembersFormset = childformset_factory(Band, BandMember, max_num=2, validate_max=True)
band_members_formset = BandMembersFormset({
'form-TOTAL_FORMS': 3,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'John Lennon',
'form-0-id': '',
'form-1-name': 'Paul McCartney',
'form-1-id': '',
'form-2-name': 'Ringo Starr',
'form-2-id': '',
})
self.assertFalse(band_members_formset.is_valid())
self.assertEqual(band_members_formset.non_form_errors()[0], "Please submit 2 or fewer forms.")
def test_max_num_pass_validation(self):
BandMembersFormset = childformset_factory(Band, BandMember, max_num=2, validate_max=True)
band_members_formset = BandMembersFormset({
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'John Lennon',
'form-0-id': '',
'form-1-name': 'Paul McCartney',
'form-1-id': '',
})
self.assertTrue(band_members_formset.is_valid())
def test_min_num_ignored_in_validation_when_validate_max_false(self):
BandMembersFormset = childformset_factory(Band, BandMember, min_num=2)
band_members_formset = BandMembersFormset({
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'John Lennon',
'form-0-id': '',
})
self.assertTrue(band_members_formset.is_valid())
def test_min_num_fail_validation(self):
BandMembersFormset = childformset_factory(Band, BandMember, min_num=2, validate_min=True)
band_members_formset = BandMembersFormset({
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'John Lennon',
'form-0-id': '',
})
self.assertFalse(band_members_formset.is_valid())
self.assertEqual(band_members_formset.non_form_errors()[0], "Please submit 2 or more forms.")
def test_min_num_pass_validation(self):
BandMembersFormset = childformset_factory(Band, BandMember, min_num=2, validate_min=True)
band_members_formset = BandMembersFormset({
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'John Lennon',
'form-0-id': '',
'form-1-name': 'Paul McCartney',
'form-1-id': '',
})
self.assertTrue(band_members_formset.is_valid())
class ChildFormsetWithM2MTest(TestCase):
def setUp(self):
self.james_joyce = Author.objects.create(name='James Joyce')
self.charles_dickens = Author.objects.create(name='Charles Dickens')
self.paper = NewsPaper.objects.create(title='the daily record')
self.article = Article.objects.create(
paper=self.paper,
title='Test article',
authors=[self.james_joyce],
)
ArticleFormset = childformset_factory(NewsPaper, Article, exclude=['categories', 'tags'], extra=3)
self.formset = ArticleFormset({
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 10,
'form-0-id': self.article.id,
'form-0-title': self.article.title,
'form-0-authors': [self.james_joyce.id, self.charles_dickens.id],
}, instance=self.paper)
ArticleTagsFormset = childformset_factory(NewsPaper, Article, exclude=['categories', 'authors'], extra=3)
self.tags_formset = ArticleTagsFormset({
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 10,
'form-0-id': self.article.id,
'form-0-title': self.article.title,
'form-0-tags': 'tag1, tagtwo',
}, instance=self.paper)
def test_save_with_commit_false(self):
self.assertTrue(self.formset.is_valid())
saved_articles = self.formset.save(commit=False)
updated_article = saved_articles[0]
# in memory
self.assertIn(self.james_joyce, updated_article.authors.all())
self.assertIn(self.charles_dickens, updated_article.authors.all())
# in db
db_article = Article.objects.get(id=self.article.id)
self.assertIn(self.james_joyce, db_article.authors.all())
self.assertNotIn(self.charles_dickens, db_article.authors.all())
def test_save_with_commit_true(self):
self.assertTrue(self.formset.is_valid())
saved_articles = self.formset.save(commit=True)
updated_article = saved_articles[0]
# in db
db_article = Article.objects.get(id=self.article.id)
self.assertIn(self.james_joyce, db_article.authors.all())
self.assertIn(self.charles_dickens, db_article.authors.all())
# in memory
self.assertIn(self.james_joyce, updated_article.authors.all())
self.assertIn(self.charles_dickens, updated_article.authors.all())
def test_tags_save_with_commit_false(self):
self.assertTrue(self.tags_formset.is_valid())
saved_articles = self.tags_formset.save(commit=False)
updated_article = saved_articles[0]
# in memory
self.assertIn('tag1', [t.slug for t in updated_article.tags.all()])
self.assertIn('tagtwo', [t.slug for t in updated_article.tags.all()])
# in db
db_article = Article.objects.get(id=self.article.id)
self.assertNotIn('tag1', [t.slug for t in db_article.tags.all()])
self.assertNotIn('tagtwo', [t.slug for t in db_article.tags.all()])
def test_tags_save_with_commit_true(self):
self.assertTrue(self.tags_formset.is_valid())
saved_articles = self.tags_formset.save(commit=True)
updated_article = saved_articles[0]
# in db
db_article = Article.objects.get(id=self.article.id)
self.assertIn('tag1', [t.slug for t in db_article.tags.all()])
self.assertIn('tagtwo', [t.slug for t in db_article.tags.all()])
# in memory
self.assertIn('tag1', [t.slug for t in updated_article.tags.all()])
self.assertIn('tagtwo', [t.slug for t in updated_article.tags.all()])
class OrderedFormsetTest(TestCase):
def test_saving_formset_preserves_order(self):
AlbumsFormset = childformset_factory(Band, Album, extra=3, can_order=True)
beatles = Band(name='The Beatles')
albums_formset = AlbumsFormset({
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'With The Beatles',
'form-0-id': '',
'form-0-ORDER': '2',
'form-1-name': 'Please Please Me',
'form-1-id': '',
'form-1-ORDER': '1',
}, instance=beatles)
self.assertTrue(albums_formset.is_valid())
albums_formset.save(commit=False)
album_names = [album.name for album in beatles.albums.all()]
self.assertEqual(['Please Please Me', 'With The Beatles'], album_names)
class NestedChildFormsetTest(TestCase):
def test_can_create_formset(self):
beatles = Band(name='The Beatles', albums=[
Album(name='Please Please Me', songs=[
Song(name='I Saw Her Standing There'),
Song(name='Misery')
])
])
AlbumsFormset = childformset_factory(Band, Album, form=ClusterForm, extra=3)
albums_formset = AlbumsFormset(instance=beatles)
self.assertEqual(4, len(albums_formset.forms))
self.assertEqual('Please Please Me', albums_formset.forms[0].instance.name)
self.assertEqual(5, len(albums_formset.forms[0].formsets['songs'].forms))
self.assertEqual(
'I Saw Her Standing There',
albums_formset.forms[0].formsets['songs'].forms[0].instance.name
)
def test_empty_formset(self):
AlbumsFormset = childformset_factory(Band, Album, form=ClusterForm, extra=3)
albums_formset = AlbumsFormset()
self.assertEqual(3, len(albums_formset.forms))
self.assertEqual(3, len(albums_formset.forms[0].formsets['songs'].forms))
def test_save_commit_false(self):
first_song = Song(name='I Saw Her Standing There')
second_song = Song(name='Mystery')
album = Album(name='Please Please Me', songs=[first_song, second_song])
beatles = Band(name='The Beatles', albums=[album])
beatles.save()
first_song_id, second_song_id = first_song.id, second_song.id
AlbumsFormset = childformset_factory(Band, Album, form=ClusterForm, extra=3)
albums_formset = AlbumsFormset({
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'Please Please Me',
'form-0-id': album.id,
'form-0-songs-TOTAL_FORMS': 4,
'form-0-songs-INITIAL_FORMS': 2,
'form-0-songs-MAX_NUM_FORMS': 1000,
'form-0-songs-0-name': 'I Saw Her Standing There',
'form-0-songs-0-DELETE': 'form-0-songs-0-DELETE',
'form-0-songs-0-id': first_song_id,
'form-0-songs-1-name': 'Misery', # changing data of an existing record
'form-0-songs-1-id': second_song_id,
'form-0-songs-2-name': '',
'form-0-songs-2-id': '',
'form-0-songs-3-name': 'Chains', # adding a record
'form-0-songs-3-id': '',
}, instance=beatles)
self.assertTrue(albums_formset.is_valid())
updated_albums = albums_formset.save(commit=False)
# updated_members should only include the items that have been changed and not deleted
self.assertEqual(1, len(updated_albums))
self.assertEqual('Please Please Me', updated_albums[0].name)
self.assertEqual(2, updated_albums[0].songs.count())
self.assertEqual('Misery', updated_albums[0].songs.first().name)
self.assertEqual(second_song_id, updated_albums[0].songs.first().id)
self.assertEqual('Chains', updated_albums[0].songs.all()[1].name)
self.assertEqual(None, updated_albums[0].songs.all()[1].id)
# Changes should not be committed to the db yet
self.assertTrue(Song.objects.filter(name='I Saw Her Standing There', id=first_song_id).exists())
self.assertEqual('Mystery', Song.objects.get(id=second_song_id).name)
self.assertFalse(Song.objects.filter(name='Chains').exists())
beatles.albums.first().songs.commit()
# this should create/update/delete database entries
self.assertEqual('Misery', Song.objects.get(id=second_song_id).name)
self.assertTrue(Song.objects.filter(name='Chains').exists())
self.assertFalse(Song.objects.filter(name='I Saw Her Standing There').exists())
def test_child_updates_without_ids(self):
first_song = Song(name='I Saw Her Standing There')
album = Album(name='Please Please Me', songs=[first_song])
beatles = Band(name='The Beatles', albums=[album])
beatles.save()
first_song_id = first_song.id
second_song = Song(name='Misery')
album.songs.add(second_song)
AlbumsFormset = childformset_factory(Band, Album, form=ClusterForm, extra=3)
albums_formset = AlbumsFormset({
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1000,
'form-0-name': 'Please Please Me',
'form-0-id': album.id,
'form-0-songs-TOTAL_FORMS': 2,
'form-0-songs-INITIAL_FORMS': 2,
'form-0-songs-MAX_NUM_FORMS': 1000,
'form-0-songs-0-name': 'I Saw Her Standing There',
'form-0-songs-0-id': first_song_id,
'form-0-songs-1-name': 'Misery',
'form-0-songs-1-id': '',
}, instance=beatles)
self.assertTrue(albums_formset.is_valid())
albums_formset.save(commit=False)
self.assertEqual(2, beatles.albums.first().songs.count())
|
py | 1a3390b76957c8496ab885c592d01bb6b859e51d | import xarray as xr
import numpy as np
import pandas as pd
def distrib_run_build_beam_pointing_vector(dat: list):
"""
Convenience function for mapping build_beam_pointing_vectors across cluster. Assumes that you are mapping this
function with a list of data.
distrib functions also return a processing status array, here a beamwise array = 2, which states that all
processed beams are at the 'beamvector' status level
Parameters
----------
dat
[hdng, bpa, tiltangle, tx_vecs, rx_vecs, tstmp, tx_reversed, rx_reversed]
Returns
-------
list
[relative azimuth, beam pointing angle, processing_status]
"""
ans = build_beam_pointing_vectors(dat[0], dat[1], dat[2], dat[3][0], dat[3][1], dat[4], dat[5])
# return processing status = 2 for all affected soundings
processing_status = xr.DataArray(np.full_like(dat[1], 2, dtype=np.uint8),
coords={'time': dat[1].coords['time'], 'beam': dat[1].coords['beam']},
dims=['time', 'beam'])
ans.append(processing_status)
return ans
def build_beam_pointing_vectors(hdng: xr.DataArray, bpa: xr.DataArray, tiltangle: xr.DataArray, tx_vecs: xr.DataArray,
rx_vecs: xr.DataArray, tx_reversed: bool = False, rx_reversed: bool = False):
"""
Beam pointing vector is the beam specific vector that arises from the intersection of the tx ping and rx cone
of sensitivity. Points at that area. Is in the geographic coordinate system, built using the tx/rx at time of
ping/receive.
Two components are returned. Relative azimuth, the angle relative to vessel heading that points at the beam
endpoint. Beam pointing angle, the roll corrected angle relative to the horizontal that points down at the beam
endpoint.
Parameters
----------
hdng
2d (time, beam) heading in degrees at ping time for each beam
bpa
2d (time, beam) receiver beam pointing angle
tiltangle
2d (time, beam) transmitter tiltangle on ping
tx_vecs
2 dim (time, xyz) representing tx 3d orientation in space across time
rx_vecs
3 dim (time, beam, xyz) representing rx 3d orientation in space across time/beam
tx_reversed
if true, the transmitter was installed 180° offset in yaw (i.e. backwards)
rx_reversed
if true, the receiver was installed 180° offset in yaw (i.e. backwards)
Returns
-------
xr.DataArray
2dim (time, beam), beam-wise beam azimuth values relative to vessel heading at time of ping
xr.DataArray
2 dim (time, beam) values for beampointingangle at each beam
"""
# main vec (primary head) is accessed using the primary system selection
rx_angle = np.deg2rad(bpa)
tx_angle = np.deg2rad(tiltangle)
if tx_reversed:
tx_angle = -tx_angle
if rx_reversed:
rx_angle = -rx_angle
beamvecs = construct_array_relative_beamvector(tx_vecs, rx_vecs, tx_angle, rx_angle)
rotgeo = return_array_geographic_rotation(tx_vecs, rx_vecs)
bv_geo = build_geographic_beam_vectors(rotgeo, beamvecs)
rel_azimuth = compute_relative_azimuth(bv_geo, hdng)
new_pointing_angle = compute_geo_beam_pointing_angle(bv_geo, rx_angle)
return [rel_azimuth, new_pointing_angle]
def construct_array_relative_beamvector(maintx: xr.DataArray, mainrx: xr.DataArray, tx_angle: xr.DataArray,
rx_angle: xr.DataArray):
"""
Given the orientation vectors representing the transmitter/receiver at time of ping/receive (maintx, mainrx) and
the TX/RX steering angles (tx_angle, rx_angle), determine new 3d beam vector components at the midpoint between
the TX and RX. This would be the 'actual' array relative beam vector.
This is a simplification of the actual scenario, adding error in the xyz due to the difference in path length/
direction of the actual ray from tx-seafloor and seafloor-rx and this co-located assumption (tx-seafloor and
rx-seafloor are the same is the assumption)
x = +FORWARD, y=+STARBOARD, z=+DOWN
Returns:
3d beam vector in co-located array ref frame. Of shape (xyz, time, beam), with 10 times and 200 beams,
beamvecs shape would be (3, 10, 200)
| <xarray.DataArray 'tiltangle' (xyz: 3, time: 10, beam: 200)>
| dask.array<concatenate, shape=(3, 10, 200), dtype=float64, chunksize=(1, 10, 200), chunktype=numpy.ndarray>
| Coordinates:
| * time (time) float64 1.496e+09 1.496e+09 ...
| * beam (beam) int32 0 1 2 3 4 5 6 7 8 ... 194 195 196 197 198 199 200
| * xyz (xyz) object 'x' 'y' 'z'
Parameters
----------
maintx
orientation vector for transmitter at time of transmit, 2dim of shape (time, xyz)
mainrx
orientation vector for receiver at time of receive, 2dim of shape (time, xyz)
tx_angle
transmitter tiltangle for each ping time
rx_angle
receiver beam pointing angle for each ping time
Returns
-------
xr.DataArray
3d beam vector in co-located array ref frame
"""
# delta - alignment angle between tx/rx vecs
delt = np.arccos(xr.dot(maintx, mainrx, dims=['xyz'])) - np.pi / 2
ysub1 = -np.sin(rx_angle)
# solve for components of 3d beam vector
ysub1 = ysub1 / np.cos(delt)
ysub2 = np.sin(tx_angle) * np.tan(delt)
radial = np.sqrt((ysub1 + ysub2) ** 2 + np.sin(tx_angle) ** 2)
x = np.sin(tx_angle)
y = ysub1 + ysub2
z = np.sqrt(1 - radial ** 2)
# generate new dataarray object for beam vectors
newx, _ = xr.broadcast(x, y) # broadcast to duplicate x along beam dimension
beamvecs = xr.concat([newx, y, z], pd.Index(list('xyz'), name='xyz'))
return beamvecs
def return_array_geographic_rotation(maintx: xr.DataArray, mainrx: xr.DataArray):
"""
Use the transmitter/receiver array orientations to build a rotation matrix between the geographic/array rel
reference frame.
Returns rotation matrices at each time/beam, of shape (beam, rot_i, time, xyz)
| <xarray.DataArray 'getitem-82dd48467b1f4e8b4f56bbe5e841cc9f' (beam: 182, rot_i: 3, time: 2, xyz: 3)>
| dask.array<transpose, shape=(182, 3, 2, 3), dtype=float64, chunksize=(182, 3, 2, 1), chunktype=numpy.ndarray>
| Coordinates:
| * rot_i (rot_i) int32 0 1 2
| * time (time) float64 1.496e+09 1.496e+09
| * beam (beam) int32 0 1 2 3 4 5 6 7 8 ... 174 175 176 177 178 179 180 181
| * xyz (xyz) <U1 'x' 'y' 'z'
Parameters
----------
maintx
orientation vector for transmitter at time of transmit, 2dim of shape (time, xyz)
mainrx
orientation vector for receiver at time of receive, 2dim of shape (time, xyz)
Returns
-------
xr.DataArray
rotation matrices at each time/beam, of shape (beam, rot_i, time, xyz)
"""
# build rotation matrix for going from locally level to geographic coord sys
x_prime = maintx
z_prime = cross(x_prime, mainrx, 'xyz')
y_prime = cross(z_prime, x_prime, 'xyz')
rotgeo = xr.concat([x_prime, y_prime, z_prime], pd.Index([0, 1, 2], name='rot_j')).T
# to do the dot product correctly, you need to align the right dimension in both matrices by giving
# them the same name (xyz for rotgeo and bv_geo in this case)
rotgeo = rotgeo.rename({'xyz': 'rot_i'})
rotgeo.coords['rot_i'] = [0, 1, 2]
rotgeo = rotgeo.rename({'rot_j': 'xyz'})
rotgeo.coords['xyz'] = ['x', 'y', 'z']
return rotgeo
def cross(a: xr.DataArray, b: xr.DataArray, spatial_dim: str, output_dtype: np.dtype = None):
"""
Xarray-compatible cross product. Compatible with dask, parallelization uses a.dtype as output_dtype
Parameters
----------
a
xarray DataArray object with a spatial_dim
b
xarray DataArray object with a spatial_dim
spatial_dim
dimension name to be mulitplied through
output_dtype
dtype of output
Returns
-------
xr.DataArray
cross product of a and b along spatial_dim
"""
for d in (a, b):
if spatial_dim not in d.dims:
raise ValueError('dimension {} not in {}'.format(spatial_dim, d))
if d.sizes[spatial_dim] != 3:
raise ValueError('dimension {} has not length 3 in {}'.format(spatial_dim, d))
if output_dtype is None:
output_dtype = a.dtype
c = xr.apply_ufunc(np.cross, a, b,
input_core_dims=[[spatial_dim], [spatial_dim]],
output_core_dims=[[spatial_dim]],
dask='parallelized', output_dtypes=[output_dtype]
)
return c
def build_geographic_beam_vectors(rotgeo: xr.DataArray, beamvecs: xr.DataArray):
"""
Apply rotation matrix to bring transducer rel. beam vectors to geographic ref frame
Parameters
----------
rotgeo
rotation matrices at each time/beam, of shape (beam, rot_i, time, xyz), see return_array_geographic_rotation
beamvecs
3d beam vector in co-located array ref frame (xyz, time, beam), see construct_array_relative_beamvector
Returns
-------
xr.DataArray
beam vectors in geographic ref frame, of shape (time, beam, bv_xyz)
"""
bv_geo = xr.dot(rotgeo, beamvecs, dims='xyz')
bv_geo = bv_geo.rename({'rot_i': 'bv_xyz'})
bv_geo.coords['bv_xyz'] = ['x', 'y', 'z']
bv_geo = bv_geo.transpose('time', 'beam', 'bv_xyz')
return bv_geo
def compute_relative_azimuth(bv_geo: xr.DataArray, heading: xr.DataArray):
"""
Compute the relative azimuth from array to end of beam vector in geographic ref frame
Parameters
----------
bv_geo
beam vectors in geographic ref frame, of shape (time, beam, bv_xyz), see build_geographic_beam_vectors
heading
1 dim array of heading values, coords=time
Returns
-------
xr.DataArray
2dim (time, beam), beam-wise beam azimuth values relative to vessel heading at time of ping
"""
# derive azimuth/angle from the newly created geographic beam vectors
bv_azimuth = np.rad2deg(np.arctan2(bv_geo.sel(bv_xyz='y'), bv_geo.sel(bv_xyz='x')))
rel_azimuth = np.deg2rad((bv_azimuth - heading + 360) % 360)
return rel_azimuth
def compute_geo_beam_pointing_angle(bv_geo: xr.DataArray, rx_angle: xr.DataArray):
"""
Build new beam pointing angle (rel to the vertical) and with the correct sign (+ to starboard) in the geographic
ref frame.
Parameters
----------
bv_geo
beam vectors in geographic ref frame, of shape (time, beam, bv_xyz), see build_geographic_beam_vectors
rx_angle
receiver beam pointing angle for each ping time
Returns
-------
xr.DataArray
2 dim (time, beam) values for beampointingangle at each beam
"""
bvangle_divisor = np.sqrt(np.square(bv_geo.sel(bv_xyz='x')) + np.square(bv_geo.sel(bv_xyz='y')))
# new pointing angle is equal to pi/2 - depression angle (depression angle relative to horiz, pointing
# angle is the incidence angle relative to vertical)
new_pointing_angle = (np.pi / 2) - np.arctan(bv_geo.sel(bv_xyz='z') / bvangle_divisor)
# flip the sign where the azimuth is pointing to port, allows us to maintain which side the angle is on
newindx = np.ones_like(new_pointing_angle)
newindx = np.negative(newindx, out=newindx, where=rx_angle < 0)
new_pointing_angle = new_pointing_angle * newindx
return new_pointing_angle
|
py | 1a33916ba2b0b36da85ddc7aea53583052ffe49d | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import re,shutil,os,sys,string,shlex
from waflib.Configure import conf
from waflib.TaskGen import feature,after_method,before_method
from waflib import Build,Utils
FC_FRAGMENT=' program main\n end program main\n'
FC_FRAGMENT2=' PROGRAM MAIN\n END\n'
def fc_flags(conf):
v=conf.env
v['FC_SRC_F']=[]
v['FC_TGT_F']=['-c','-o']
v['FCINCPATH_ST']='-I%s'
v['FCDEFINES_ST']='-D%s'
if not v['LINK_FC']:v['LINK_FC']=v['FC']
v['FCLNK_SRC_F']=[]
v['FCLNK_TGT_F']=['-o']
v['FCFLAGS_fcshlib']=['-fpic']
v['LINKFLAGS_fcshlib']=['-shared']
v['fcshlib_PATTERN']='lib%s.so'
v['fcstlib_PATTERN']='lib%s.a'
v['FCLIB_ST']='-l%s'
v['FCLIBPATH_ST']='-L%s'
v['FCSTLIB_ST']='-l%s'
v['FCSTLIBPATH_ST']='-L%s'
v['FCSTLIB_MARKER']='-Wl,-Bstatic'
v['FCSHLIB_MARKER']='-Wl,-Bdynamic'
v['SONAME_ST']='-Wl,-h,%s'
def check_fortran(self,*k,**kw):
self.check_cc(fragment=FC_FRAGMENT,compile_filename='test.f',features='fc fcprogram',msg='Compiling a simple fortran app')
def check_fc(self,*k,**kw):
kw['compiler']='fc'
if not'compile_mode'in kw:
kw['compile_mode']='fc'
if not'type'in kw:
kw['type']='fcprogram'
if not'compile_filename'in kw:
kw['compile_filename']='test.f90'
if not'code'in kw:
kw['code']=FC_FRAGMENT
return self.check(*k,**kw)
def fortran_modifier_darwin(conf):
v=conf.env
v['FCFLAGS_fcshlib']=['-fPIC','-compatibility_version','1','-current_version','1']
v['LINKFLAGS_fcshlib']=['-dynamiclib']
v['fcshlib_PATTERN']='lib%s.dylib'
v['FRAMEWORKPATH_ST']='-F%s'
v['FRAMEWORK_ST']='-framework %s'
v['LINKFLAGS_fcstlib']=[]
v['FCSHLIB_MARKER']=''
v['FCSTLIB_MARKER']=''
v['SONAME_ST']=''
def fortran_modifier_win32(conf):
v=conf.env
v['fcprogram_PATTERN']=v['fcprogram_test_PATTERN']='%s.exe'
v['fcshlib_PATTERN']='%s.dll'
v['implib_PATTERN']='lib%s.dll.a'
v['IMPLIB_ST']='-Wl,--out-implib,%s'
v['FCFLAGS_fcshlib']=[]
v.append_value('FCFLAGS_fcshlib',['-DDLL_EXPORT'])
v.append_value('LINKFLAGS',['-Wl,--enable-auto-import'])
def fortran_modifier_cygwin(conf):
fortran_modifier_win32(conf)
v=conf.env
v['fcshlib_PATTERN']='cyg%s.dll'
v.append_value('LINKFLAGS_fcshlib',['-Wl,--enable-auto-image-base'])
v['FCFLAGS_fcshlib']=[]
def check_fortran_dummy_main(self,*k,**kw):
if not self.env.CC:
self.fatal('A c compiler is required for check_fortran_dummy_main')
lst=['MAIN__','__MAIN','_MAIN','MAIN_','MAIN']
lst.extend([m.lower()for m in lst])
lst.append('')
self.start_msg('Detecting whether we need a dummy main')
for main in lst:
kw['fortran_main']=main
try:
self.check_cc(fragment='int %s() { return 0; }\n'%(main or'test'),features='c fcprogram',mandatory=True)
if not main:
self.env.FC_MAIN=-1
self.end_msg('no')
else:
self.env.FC_MAIN=main
self.end_msg('yes %s'%main)
break
except self.errors.ConfigurationError:
pass
else:
self.end_msg('not found')
self.fatal('could not detect whether fortran requires a dummy main, see the config.log')
GCC_DRIVER_LINE=re.compile('^Driving:')
POSIX_STATIC_EXT=re.compile('\S+\.a')
POSIX_LIB_FLAGS=re.compile('-l\S+')
def is_link_verbose(self,txt):
assert isinstance(txt,str)
for line in txt.splitlines():
if not GCC_DRIVER_LINE.search(line):
if POSIX_STATIC_EXT.search(line)or POSIX_LIB_FLAGS.search(line):
return True
return False
def check_fortran_verbose_flag(self,*k,**kw):
self.start_msg('fortran link verbose flag')
for x in['-v','--verbose','-verbose','-V']:
try:
self.check_cc(features='fc fcprogram_test',fragment=FC_FRAGMENT2,compile_filename='test.f',linkflags=[x],mandatory=True)
except self.errors.ConfigurationError:
pass
else:
if self.is_link_verbose(self.test_bld.err)or self.is_link_verbose(self.test_bld.out):
self.end_msg(x)
break
else:
self.end_msg('failure')
self.fatal('Could not obtain the fortran link verbose flag (see config.log)')
self.env.FC_VERBOSE_FLAG=x
return x
LINKFLAGS_IGNORED=[r'-lang*',r'-lcrt[a-zA-Z0-9\.]*\.o',r'-lc$',r'-lSystem',r'-libmil',r'-LIST:*',r'-LNO:*']
if os.name=='nt':
LINKFLAGS_IGNORED.extend([r'-lfrt*',r'-luser32',r'-lkernel32',r'-ladvapi32',r'-lmsvcrt',r'-lshell32',r'-lmingw',r'-lmoldname'])
else:
LINKFLAGS_IGNORED.append(r'-lgcc*')
RLINKFLAGS_IGNORED=[re.compile(f)for f in LINKFLAGS_IGNORED]
def _match_ignore(line):
for i in RLINKFLAGS_IGNORED:
if i.match(line):
return True
return False
def parse_fortran_link(lines):
final_flags=[]
for line in lines:
if not GCC_DRIVER_LINE.match(line):
_parse_flink_line(line,final_flags)
return final_flags
SPACE_OPTS=re.compile('^-[LRuYz]$')
NOSPACE_OPTS=re.compile('^-[RL]')
def _parse_flink_line(line,final_flags):
lexer=shlex.shlex(line,posix=True)
lexer.whitespace_split=True
t=lexer.get_token()
tmp_flags=[]
while t:
def parse(token):
if _match_ignore(token):
pass
elif token.startswith('-lkernel32')and sys.platform=='cygwin':
tmp_flags.append(token)
elif SPACE_OPTS.match(token):
t=lexer.get_token()
if t.startswith('P,'):
t=t[2:]
for opt in t.split(os.pathsep):
tmp_flags.append('-L%s'%opt)
elif NOSPACE_OPTS.match(token):
tmp_flags.append(token)
elif POSIX_LIB_FLAGS.match(token):
tmp_flags.append(token)
else:
pass
t=lexer.get_token()
return t
t=parse(t)
final_flags.extend(tmp_flags)
return final_flags
def check_fortran_clib(self,autoadd=True,*k,**kw):
if not self.env.FC_VERBOSE_FLAG:
self.fatal('env.FC_VERBOSE_FLAG is not set: execute check_fortran_verbose_flag?')
self.start_msg('Getting fortran runtime link flags')
try:
self.check_cc(fragment=FC_FRAGMENT2,compile_filename='test.f',features='fc fcprogram_test',linkflags=[self.env.FC_VERBOSE_FLAG])
except:
self.end_msg(False)
if kw.get('mandatory',True):
conf.fatal('Could not find the c library flags')
else:
out=self.test_bld.err
flags=parse_fortran_link(out.splitlines())
self.end_msg('ok (%s)'%' '.join(flags))
self.env.LINKFLAGS_CLIB=flags
return flags
return[]
def getoutput(conf,cmd,stdin=False):
try:
if stdin:
stdin=Utils.subprocess.PIPE
else:
stdin=None
p=Utils.subprocess.Popen(cmd,stdin=stdin,stdout=Utils.subprocess.PIPE,stderr=Utils.subprocess.PIPE)
if stdin:
p.stdin.write('\n')
stdout,stderr=p.communicate()
except:
conf.fatal('could not determine the compiler version %r'%cmd)
else:
if not isinstance(stdout,str):
stdout=stdout.decode(sys.stdout.encoding)
if not isinstance(stderr,str):
stderr=stderr.decode(sys.stdout.encoding)
return stdout,stderr
ROUTINES_CODE="""\
subroutine foobar()
return
end
subroutine foo_bar()
return
end
"""
MAIN_CODE="""
void %(dummy_func_nounder)s(void);
void %(dummy_func_under)s(void);
int %(main_func_name)s() {
%(dummy_func_nounder)s();
%(dummy_func_under)s();
return 0;
}
"""
def link_main_routines_tg_method(self):
def write_test_file(task):
task.outputs[0].write(task.generator.code)
bld=self.bld
bld(rule=write_test_file,target='main.c',code=MAIN_CODE%self.__dict__)
bld(rule=write_test_file,target='test.f',code=ROUTINES_CODE)
bld(features='fc fcstlib',source='test.f',target='test')
bld(features='c fcprogram',source='main.c',target='app',use='test')
def mangling_schemes():
for u in['_','']:
for du in['','_']:
for c in["lower","upper"]:
yield(u,du,c)
def mangle_name(u,du,c,name):
return getattr(name,c)()+u+(name.find('_')!=-1 and du or'')
def check_fortran_mangling(self,*k,**kw):
if not self.env.CC:
self.fatal('A c compiler is required for link_main_routines')
if not self.env.FC:
self.fatal('A fortran compiler is required for link_main_routines')
if not self.env.FC_MAIN:
self.fatal('Checking for mangling requires self.env.FC_MAIN (execute "check_fortran_dummy_main" first?)')
self.start_msg('Getting fortran mangling scheme')
for(u,du,c)in mangling_schemes():
try:
self.check_cc(compile_filename=[],features='link_main_routines_func',msg='nomsg',errmsg='nomsg',mandatory=True,dummy_func_nounder=mangle_name(u,du,c,"foobar"),dummy_func_under=mangle_name(u,du,c,"foo_bar"),main_func_name=self.env.FC_MAIN)
except self.errors.ConfigurationError:
pass
else:
self.end_msg("ok ('%s', '%s', '%s-case')"%(u,du,c))
self.env.FORTRAN_MANGLING=(u,du,c)
break
else:
self.end_msg(False)
self.fatal('mangler not found')
return(u,du,c)
def set_lib_pat(self):
self.env['fcshlib_PATTERN']=self.env['pyext_PATTERN']
conf(fc_flags)
conf(check_fortran)
conf(check_fc)
conf(fortran_modifier_darwin)
conf(fortran_modifier_win32)
conf(fortran_modifier_cygwin)
conf(check_fortran_dummy_main)
conf(is_link_verbose)
conf(check_fortran_verbose_flag)
conf(check_fortran_clib)
feature('link_main_routines_func')(link_main_routines_tg_method)
before_method('process_source')(link_main_routines_tg_method)
conf(check_fortran_mangling)
feature('pyext')(set_lib_pat)
before_method('propagate_uselib_vars','apply_link')(set_lib_pat) |
py | 1a33919d51bdd94fd39a3213f06cc2a4e389ab01 | # -*- coding: utf-8 -*-
#
# This file is part of tofbot, a friendly IRC bot.
# You may redistribute it under the Simplified BSD License.
# If we meet some day, and you think this stuff is worth it,
# you can buy us a beer in return.
#
# Copyright (c) 2011 Etienne Millon <[email protected]>
# Martin Kirchgessner <[email protected]>
from toflib import Plugin
from toflib import distance
class PluginDassin(Plugin):
def handle_msg(self, msg_text, chan, nick):
ete = [ "tu sais"
, "je n'ai jamais été aussi heureux que ce matin-là"
, "nous marchions sur une plage"
, "un peu comme celle-ci"
, "c'était l'automne"
, "un automne où il faisait beau"
, "une saison qui n'existe que dans le nord de l'amérique"
, "là-bas on l'appelle l'été indien"
, "mais c'était tout simplement le nôtre"
, "avec ta robe longue"
, "tu ressemblais à une aquarelle de marie laurencin"
, "et je me souviens"
, "je me souviens très bien de ce que je t'ai dit ce matin-là"
, "il y a un an"
, "il y a un siècle"
, "il y a une éternité"
, "on ira"
, "où tu voudras, quand tu voudras"
, "et l'on s'aimera encore"
, "lorsque l'amour sera mort"
, "toute la vie"
, "sera pareille à ce matin"
, "aux couleurs de l'été indien"
]
colline = [ "je l'ai vue près d'un laurier"
, "elle gardait ses blanches brebis"
, "quand j'ai demandé d'où venait sa peau fraîche elle m'a dit"
, "c'est de rouler dans la rosée qui rend les bergères jolies"
, "mais quand j'ai dit qu'avec elle"
, "je voudrais y rouler aussi"
, "elle m'a dit"
, "elle m'a dit d'aller siffler là-haut sur la colline"
, "de l'attendre avec un petit bouquet d'églantines"
, "j'ai cueilli des fleurs"
, "et j'ai sifflé tant que j'ai pu"
, "j'ai attendu, attendu"
, "elle n'est jamais venue"
, "zay zay zay zay"
]
# Rone - Bora (vocal edit, un texte d'Alain Damasio), texte redécoupé pour que ça toffe
bora = [ "il n'y pas de secret",
"pas de secrets",
"il y a une vérité",
"simple, sobre, crue, quoi",
"la horde du contrevent",
"tu la réussiras uniquement si tu t'isoles",
"si tu t'isoles quoi",
"tu comprends ce que ça veut dire isole ?",
"isola",
"l'ile quoi",
"tu crées ton ile et tu l'effaces au maximum",
"il faut que les gens soient extrêmement loin de toi",
"mais loin parce que ton univers sera vaste",
"sera immense",
"sera énorme",
"énorme univers",
"énorme puissance d'univers",
"caracole il existe en toi complètement",
"comme strochnis",
"qu'il soit toi",
"que pietro della rocca tu le deviennes",
"et la croute aussi",
"et tous l'univers"
"et tout le vent",
"tu vis complètement la dedans",
"c'est ca qu'il faut",
"y a que ca qu'il faut",
"tu restes collé au vent",
"collé au vent",
"collé au vent, toi",
"et que tu te battes",
"que tu ne fasses aucune concessison sur le reste",
"tu oublies tout",
"t'es pas consultant, t'es rien",
"le consulting c'est d'la merde",
"la seule chose qui a d'la valeur",
"c'est quand t'es capable de faire un chapitre comme celui-là",
"ça, ça restera, ça mérite que tu vives",
"tu peux vivre pour écrire ça",
"ça mérite que tu vives",
"là t'es pas né pour rien",
"t'es nécessaire",
"t'es pas surnuméraire",
"t'es pas superflu",
"là t'as une nécessité quand t'écris ça",
"la nécessité d'être",
"et c'est ça qu'il faut tenir mec",
"c'est ça qu'il faut putain de tenir",
"lâches pas le morceau",
"t'fais pas enculer",
"t'fais pas disperser",
"t'fais pas fragmenter",
"fais pas de concession",
"y'a pas de concession avec la vie",
"y'a pas de concession",
"tu vis et faut vivre à fond"
]
oizo = ["coucou","tu veux voir ma bite ?"]
hell = ["hell", "cook"]
chuck = ["nope", "it's just Chuck Testa !"]
hibernatus = [ "j'ai tout visité en 2 secondes"
, "Pékin, Tokyo, la Joconde"
, "j'ai fait tous les jobs possibles"
, "plombier, pute et belle fille"
, "j'ai sodomisé un louveteau"
, "avec le manche d'un marteau"
, "j'ai grandi à Harlem"
, "avec Paul Préboist et Vandel"
, "j'ai braqué le CIO"
, "pour m'acheter le Figaro"
, "j'ai buté ma grand-mére"
, "parce que je ne savais pas quoi faire"
, "j'ai aussi buté Diana"
, "mais pour de l'argent cette fois"
, "j'ai été chez un psy"
, "pour lui dire que j'étais guérie"
, "j'ai aussi mangé du dauphin"
, "flipper était pas si malin"
, "j'ai fais la Star Academy"
, "pour chanter avec Fiori"
, "j'ai inventé la bouffe congelée"
, "et j'me ferai cryogéniser"
, "j'ai déjà vu Hibernatus"
, "j'ai le Dvd dans mon anus"
, "j'suis déjà allée partout"
, "j'ai tout ramené, je connais tout"
, "j'ai pas besoin d'en apprendre plus"
, "j'ai le dvd dans mon anus"
, "j'suis déjà allée partout"
, "j'ai tout ramené, je connais tout"
, "j'ai pas besoin d'en apprendre plus"
, "j'ai le dvd dans mon anus"
]
songs = [oizo, ete, colline, bora, hell, hibernatus, chuck]
searched = msg_text.lower()
minDist = 9999999
best = ""
for song in songs:
try:
i = 0
for line in song:
dist = distance(line, searched)
if dist < minDist:
best = song[i+1]
minDist = dist
i += 1
except:
pass
if len(best) > 3 and minDist < (len(searched)/3):
self.say(best)
|
py | 1a3391d0450547866c34f6d433903ef7a6e11b30 | from cConstants import cEPAConstants
from cEnum import eEPA
import cPlot3D
class cPlotFrame(cPlot3D.cPlotFrame):
def __init__(self, iParent, **kwargs):
cPlot3D.cPlotFrame.__init__(self, iParent, **kwargs)
def initPanel(self, *args, **kwargs):
self.m_PlotPanel = cPlotPanel(self, **kwargs)
class cPlotPanel(cPlot3D.cPlotPanel):
def __init__(self, iParent, iXAxisItem=eEPA.evaluation, iYAxisItem=eEPA.potency, iZAxisItem=eEPA.activity, iPlotType=eEPA.fundamental, **kwargs):
cPlot3D.cPlotPanel.__init__(self, iParent, **kwargs)
self.m_XAxisItem = iXAxisItem
self.m_YAxisItem = iYAxisItem
self.m_ZAxisItem = iZAxisItem
self.m_PlotType = eEPA.fundamental
def getSentimentEPAIndex(self, iEPA, iSentiment):
return iEPA + (cEPAConstants.m_Dimensions * iSentiment)
# Axis items are the enumerations of the elements in eEPA, so they're basically numbers
def setAxis(iXAxisItem, iYAxisItem, iZAxisItem):
self.m_XAxisItem = iXAxisItem
self.m_YAxisItem = iYAxisItem
self.m_ZAxisItem = iZAxisItem
def plotEPA(self, iLearnerSamples, iSimulatorSamples):
self.clearAxes()
if (0 < len(iLearnerSamples)):
# Learner's sentiments on self and other, green and pink respectively
self.plotScatter(
iLearnerSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier)],
iLearnerSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier)],
iLearnerSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_SelfMultiplier)],
iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=50, c="green", alpha=1, animated=False)
self.plotScatter(
iLearnerSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier)],
iLearnerSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier)],
iLearnerSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_OtherMultiplier)],
iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=50, c="pink", alpha=1, animated=False)
if (0 < len(iSimulatorSamples)):
# Simulator's sentiments on self and other, goldenrod and blue respectively
self.plotScatter(
iSimulatorSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier)],
iSimulatorSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier)],
iSimulatorSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_SelfMultiplier)],
iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=50, c="goldenrod", alpha=1, animated=False)
self.plotScatter(
iSimulatorSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier)],
iSimulatorSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier)],
iSimulatorSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_OtherMultiplier)],
iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=50, c="blue", alpha=1, animated=False)
self.m_Axes.set_xlabel(cEPAConstants.m_EPALabels[self.m_XAxisItem])
self.m_Axes.set_ylabel(cEPAConstants.m_EPALabels[self.m_YAxisItem])
self.m_Axes.set_zlabel(cEPAConstants.m_EPALabels[self.m_ZAxisItem])
self.redrawAxes()
|
py | 1a339208f4dbcfeb1e8baeafe6e55d76ce32f1aa | """Test the permission utils."""
from homeassistant.auth.permissions import util
def test_test_all():
"""Test if we can test the all group."""
for val in (
None,
{},
{'all': None},
{'all': {}},
):
assert util.test_all(val, 'read') is False
for val in (
True,
{'all': True},
{'all': {'read': True}},
):
assert util.test_all(val, 'read') is True
|
py | 1a33922d1b24e06dc8b0c8f281ac109d5c3240e9 | import setuptools
import os
import stat
from setuptools.command.install import install
from distutils import log
with open("README.md", "r") as fh:
long_description = fh.read()
"""
The below code is taken from https://github.com/Uberi/speech_recognition
See README.md for licence information
"""
FILES_TO_MARK_EXECUTABLE = ["flac-linux-x86", "flac-linux-x86_64", "flac-mac", "flac-win32.exe"]
class InstallWithExtraSteps(install):
def run(self):
install.run(self) # do the original install steps
# mark the FLAC executables as executable by all users (this fixes occasional issues when file permissions get messed up)
for output_path in self.get_outputs():
if os.path.basename(output_path) in FILES_TO_MARK_EXECUTABLE:
log.info("setting executable permissions on {}".format(output_path))
stat_info = os.stat(output_path)
os.chmod(
output_path,
stat_info.st_mode |
stat.S_IRUSR | stat.S_IXUSR | # owner can read/execute
stat.S_IRGRP | stat.S_IXGRP | # group can read/execute
stat.S_IROTH | stat.S_IXOTH # everyone else can read/execute
)
"""
Below is DanSpeech licence
"""
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setuptools.setup(
name="danspeech",
version="1.0.3",
author="Rasmus Arpe Fogh Jensen, Martin Carsten Nielsen",
author_email="[email protected], [email protected],",
description="Speech recognition for Danish",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/danspeech/danspeech",
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=requirements,
license_file="LICENCE.txt",
classifiers=[
"Programming Language :: Python :: 3",
'Development Status :: 5 - Production/Stable',
"Operating System :: OS Independent",
],
)
|
py | 1a33926904995e57dbe36a4aaf3eb5662ae6959c | import argparse
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
from sklearn.model_selection import train_test_split
import utils
from utils import Dataset
from model import LinearNet
import matplotlib.pyplot as plt
import imageio
import numpy as np
def selection(data):
namelist = list()
for x in data.columns.values:
namelist.append(x)
return namelist
def variable_check(data, variable):
if variable not in selection(data):
print('[INFO] Missing variable!')
sys.exit(0)
else:
pass
# index = dict((y, x) for x, y in enumerate(selection(data)))
# if variable is not None:
# try:
# var = index[variable]
# except KeyError:
# print("Variable is empty or not found!")
# sys.exit(0)
# else:
# print(f"Variable '{variable}:{var}' is exist.")
# pass
def gpu_dataset(X, Y):
X_tensor = torch.FloatTensor(X).cuda()
y_tensor = torch.FloatTensor(Y).cuda()
X_train, X_test, y_train, y_test = train_test_split(X_tensor, y_tensor,
test_size=0.2,
random_state=0)
x, y = Variable(X_train), Variable(y_train)
return x, y
def cpu_dataset(X, Y):
X_tensor = torch.FloatTensor(X)
y_tensor = torch.FloatTensor(Y)
X_train, X_test, y_train, y_test = train_test_split(X_tensor, y_tensor,
test_size=0.2,
random_state=0)
x, y = X_train, y_train
return x, y
def train():
input_dir, var1, var2, adam, device = opt.input, opt.var1, opt.var2, opt.adam, opt.device
data = Dataset(input_dir).data
device = utils.select_device(device, batch_size=opt.batch_size)
for i in (var1, var2):
variable_check(data, i)
use_cuda = torch.cuda.is_available()
X_reshape = data[var1].values.reshape(-1, 1)
y_reshape = data[var2].values.reshape(-1, 1)
if use_cuda:
x, y = gpu_dataset(X_reshape, y_reshape)
else:
x, y = cpu_dataset(X_reshape, y_reshape)
# Initialize model
net = LinearNet(n_feature=x.size(1), n_output=y.size(1)).to(device)
if adam:
# optimizer using Adam
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
else:
# optimizer using SGD
optimizer = torch.optim.SGD(net.parameters(), lr=0.001)
loss_func = nn.MSELoss()
batch_size = opt.batch_size
n_epochs = opt.epoch
batch_no = len(x) // batch_size
train_loss = 0
train_loss_min = np.Inf
if use_cuda:
for epoch in range(n_epochs):
for i in range(batch_no):
start = i* batch_size
end = start + batch_size
optimizer.zero_grad()
prediction = net(x)
loss = loss_func(prediction, y)
loss.backward()
optimizer.step()
values, labels = torch.max(prediction, 1)
num_right = np.sum(labels.cpu().data.numpy() == y[start:end])
train_loss += loss.item()*batch_size
train_loss = train_loss / len(x)
if train_loss <= train_loss_min:
print("Validation loss decreased ({:6f} ===> {:6f}). Saving the model...".format(train_loss_min,train_loss))
torch.save(net.state_dict(), "regression_model.pt")
train_loss_min = train_loss
if epoch % 50 == 0:
print('')
print("Epoch: {} \tTrain Loss: {} \tTrain Accuracy: {}".format(epoch+1, train_loss,num_right / len(y[start:end]) ))
print('Training Ended! ')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=10, help='epoch value')
parser.add_argument('--batch-size', type=int, default=8)
parser.add_argument('--input', type=str, default='./example/data.csv', help='*.csv path')
parser.add_argument('--var1', type=str, default='H', help='independent variable')
parser.add_argument('--var2', type=str, default='VUB', help='dependent variable')
parser.add_argument('--adam', action='store_true', default=True, help='use adam optimizer')
parser.add_argument('--device', default='0', help='device id (i.e. 0 or 0,1 or cpu)')
opt = parser.parse_args()
print(opt)
# device = utils.select_device(opt.device, batch_size=opt.batch_size)
# print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
# tb_writer = SummaryWriter(comment=opt.name)
train()
|
py | 1a33936ccc24bd830e1ed06f2f474c2436b8582d | """
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from app import config
class TestV2TokenABI:
"""
Test Case for v2.token_abi.~~
"""
# テスト対象API
apiurl_base = '/v2/ABI'
# <正常系1>
# 普通債券ABI取得
def test_straightbondabi_normal(self, client, session, shared_contract):
config.BOND_TOKEN_ENABLED = True
apiurl = self.apiurl_base + '/StraightBond'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 200
assert resp.json['meta'] == {'code': 200, 'message': 'OK'}
assert resp.json['data'] is not None
# <正常系2>
# 株式ABI取得
def test_shareabi_normal(self, client, session, shared_contract):
config.SHARE_TOKEN_ENABLED = True
apiurl = self.apiurl_base + '/Share'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 200
assert resp.json['meta'] == {'code': 200, 'message': 'OK'}
assert resp.json['data'] is not None
# <正常系3>
# 会員権ABI取得
def test_membershipabi_normal(self, client, session, shared_contract):
config.MEMBERSHIP_TOKEN_ENABLED = True
apiurl = self.apiurl_base + '/Membership'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 200
assert resp.json['meta'] == {'code': 200, 'message': 'OK'}
assert resp.json['data'] is not None
# <正常系4>
# クーポンABI取得
def test_couponabi_normal(self, client, session, shared_contract):
config.COUPON_TOKEN_ENABLED = True
apiurl = self.apiurl_base + '/Coupon'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 200
assert resp.json['meta'] == {'code': 200, 'message': 'OK'}
assert resp.json['data'] is not None
# <異常系1>
# 存在しないABI
def test_error_1(self, client, session, shared_contract):
apiurl = self.apiurl_base + '/Unknown'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 404
# <異常系2>
# 普通債券ABI ENABLED=false
def test_error_2(self, client, session, shared_contract):
config.BOND_TOKEN_ENABLED = False
apiurl = self.apiurl_base + '/StraightBond'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 404
assert resp.json['meta'] == {
'code': 10,
'message': 'Not Supported',
'description': 'method: GET, url: /v2/ABI/StraightBond'
}
# <異常系3>
# 株式ABI ENABLED=false
def test_error_3(self, client, session, shared_contract):
config.SHARE_TOKEN_ENABLED = False
apiurl = self.apiurl_base + '/Share'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 404
assert resp.json['meta'] == {
'code': 10,
'message': 'Not Supported',
'description': 'method: GET, url: /v2/ABI/Share'
}
# <異常系4>
# 会員権ABI ENABLED=false
def test_error_4(self, client, session, shared_contract):
config.MEMBERSHIP_TOKEN_ENABLED = False
apiurl = self.apiurl_base + '/Membership'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 404
assert resp.json['meta'] == {
'code': 10,
'message': 'Not Supported',
'description': 'method: GET, url: /v2/ABI/Membership'
}
# <異常系5>
# クーポンABI ENABLED=false
def test_error_5(self, client, session, shared_contract):
config.COUPON_TOKEN_ENABLED = False
apiurl = self.apiurl_base + '/Coupon'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 404
assert resp.json['meta'] == {
'code': 10,
'message': 'Not Supported',
'description': 'method: GET, url: /v2/ABI/Coupon'
} |
py | 1a33943d2cf0f6c01fc1fd72edefaa54e0e682d5 | import tensorflow as tf
from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D
from tensorflow.keras.models import Model
import numpy as np
class SelfAttention(Model):
def __init__(self, d_model, spatial_dims, positional_encoding=True, name="self_attention"):
'''
d_model : number of output channels
spatial_dim : spatial dimensions of input tensor (x , y)
if positional_encoding: depth must correspond to input channel number
adapted from: https://www.tensorflow.org/tutorials/text/transformer
'''
super().__init__(name=name)
self.d_model = d_model
self.spatial_dims=spatial_dims
self.spatial_dim = np.prod(spatial_dims)
self.wq = Dense(self.d_model, name=name+"_q")
self.wk = Dense(self.d_model, name=name+"_k")
self.wv = Dense(self.d_model, name=name+"_w")
self.positional_encoding=positional_encoding
if positional_encoding:
self.pos_embedding = Embedding(self.spatial_dim, d_model, name=name+"pos_enc") # TODO test other positional encoding. in particular that encodes X and Y
def call(self, x):
'''
x : tensor with shape (batch_size, y, x, channels)
'''
shape = tf.shape(x)
batch_size = shape[0]
#spatial_dims = shape[1:-1]
#spatial_dim = tf.reduce_prod(spatial_dims)
depth_dim = shape[3]
if self.positional_encoding:
x_index = tf.range(self.spatial_dim, dtype=tf.int32)
pos_emb = self.pos_embedding(x_index) # (spa_dim, d_model)
pos_emb = tf.reshape(pos_emb, (self.spatial_dims[0], self.spatial_dims[1], self.d_model)) #for broadcasting purpose
x = x + pos_emb # broadcast
q = self.wq(x) # (batch_size, *spa_dims, d_model)
k = self.wk(x) # (batch_size, *spa_dims, d_model)
v = self.wv(x) # (batch_size, *spa_dims, d_model)
q = tf.reshape(q, (batch_size, -1, depth_dim)) # (batch_size, spa_dim, d_model)
k = tf.reshape(k, (batch_size, -1, depth_dim))
v = tf.reshape(v, (batch_size, -1, depth_dim))
# scaled_attention.shape == (batch_size, spa_dims, depth)
# attention_weights.shape == (batch_size, spa_dims, spa_dims)
scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v)
output = tf.reshape(scaled_attention, (batch_size, self.spatial_dims[0], self.spatial_dims[1], self.d_model))
tf.identity(attention_weights, name=self.name+"_attention_weights")
return output, attention_weights
def compute_output_shape(self, input_shape):
return input_shape[:-1]+(self.d_model,), (input_shape[0],self.spatial_dim,self.spatial_dim)
def scaled_dot_product_attention(q, k, v):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
Returns:
output, attention_weights
from : https://www.tensorflow.org/tutorials/text/transformer
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
|
py | 1a3394fbcd18af63b1e119a2eb658cdb948d16e3 | #!/usr/bin/env python
# Copyright (C) 2013-2014 Reinhard Stampp
# Copyright (C) 2014-2016 Sascha Kopp
# This file is part of fortrace - http://fortrace.fbi.h-da.de
# See the file 'docs/LICENSE' for copying permission.
from distutils.core import setup
setup(
name='fortrace',
version='1.0',
packages=['fortrace', 'fortrace.attacks', 'fortrace.botnet', 'fortrace.botnet.net', 'fortrace.botnet.net.meta', 'fortrace.botnet.net.proto',
'fortrace.core', 'fortrace.generator', 'fortrace.botnet.common', 'fortrace.botnet.core', 'fortrace.botnet.core.bmoncomponents',
'fortrace.utility', 'fortrace.application', 'fortrace.inputDevice', 'fortrace.botnet.bots',
'fortrace.botnet.bots.hellobot', 'fortrace.botnet.bots.mariposa', 'fortrace.botnet.bots.zeus'],
package_dir={'fortrace': '../src/fortrace'},
package_data={'fortrace': ['../utility/conf/*']},
url='fortrace.fbi.h-da.de',
license='',
author='Reinhard Stampp, Sascha Kopp',
author_email='[email protected], [email protected]',
description='Python bindings for fortrace.'
)
|
py | 1a339596cca0f0f6ea3073d53304fdaef2225104 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class Application(object):
"""
The application type contains the audit summary information and the definition of the application.
"""
def __init__(self, **kwargs):
"""
Initializes a new Application object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key:
The value to assign to the key property of this Application.
:type key: str
:param model_type:
The value to assign to the model_type property of this Application.
:type model_type: str
:param model_version:
The value to assign to the model_version property of this Application.
:type model_version: str
:param name:
The value to assign to the name property of this Application.
:type name: str
:param description:
The value to assign to the description property of this Application.
:type description: str
:param application_version:
The value to assign to the application_version property of this Application.
:type application_version: int
:param object_status:
The value to assign to the object_status property of this Application.
:type object_status: int
:param identifier:
The value to assign to the identifier property of this Application.
:type identifier: str
:param parent_ref:
The value to assign to the parent_ref property of this Application.
:type parent_ref: oci.data_integration.models.ParentReference
:param object_version:
The value to assign to the object_version property of this Application.
:type object_version: int
:param dependent_object_metadata:
The value to assign to the dependent_object_metadata property of this Application.
:type dependent_object_metadata: list[oci.data_integration.models.PatchObjectMetadata]
:param published_object_metadata:
The value to assign to the published_object_metadata property of this Application.
:type published_object_metadata: dict(str, PatchObjectMetadata)
:param source_application_info:
The value to assign to the source_application_info property of this Application.
:type source_application_info: oci.data_integration.models.SourceApplicationInfo
:param time_patched:
The value to assign to the time_patched property of this Application.
:type time_patched: datetime
:param metadata:
The value to assign to the metadata property of this Application.
:type metadata: oci.data_integration.models.ObjectMetadata
:param key_map:
The value to assign to the key_map property of this Application.
:type key_map: dict(str, str)
"""
self.swagger_types = {
'key': 'str',
'model_type': 'str',
'model_version': 'str',
'name': 'str',
'description': 'str',
'application_version': 'int',
'object_status': 'int',
'identifier': 'str',
'parent_ref': 'ParentReference',
'object_version': 'int',
'dependent_object_metadata': 'list[PatchObjectMetadata]',
'published_object_metadata': 'dict(str, PatchObjectMetadata)',
'source_application_info': 'SourceApplicationInfo',
'time_patched': 'datetime',
'metadata': 'ObjectMetadata',
'key_map': 'dict(str, str)'
}
self.attribute_map = {
'key': 'key',
'model_type': 'modelType',
'model_version': 'modelVersion',
'name': 'name',
'description': 'description',
'application_version': 'applicationVersion',
'object_status': 'objectStatus',
'identifier': 'identifier',
'parent_ref': 'parentRef',
'object_version': 'objectVersion',
'dependent_object_metadata': 'dependentObjectMetadata',
'published_object_metadata': 'publishedObjectMetadata',
'source_application_info': 'sourceApplicationInfo',
'time_patched': 'timePatched',
'metadata': 'metadata',
'key_map': 'keyMap'
}
self._key = None
self._model_type = None
self._model_version = None
self._name = None
self._description = None
self._application_version = None
self._object_status = None
self._identifier = None
self._parent_ref = None
self._object_version = None
self._dependent_object_metadata = None
self._published_object_metadata = None
self._source_application_info = None
self._time_patched = None
self._metadata = None
self._key_map = None
@property
def key(self):
"""
Gets the key of this Application.
Generated key that can be used in API calls to identify application.
:return: The key of this Application.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this Application.
Generated key that can be used in API calls to identify application.
:param key: The key of this Application.
:type: str
"""
self._key = key
@property
def model_type(self):
"""
Gets the model_type of this Application.
The object type.
:return: The model_type of this Application.
:rtype: str
"""
return self._model_type
@model_type.setter
def model_type(self, model_type):
"""
Sets the model_type of this Application.
The object type.
:param model_type: The model_type of this Application.
:type: str
"""
self._model_type = model_type
@property
def model_version(self):
"""
Gets the model_version of this Application.
The object's model version.
:return: The model_version of this Application.
:rtype: str
"""
return self._model_version
@model_version.setter
def model_version(self, model_version):
"""
Sets the model_version of this Application.
The object's model version.
:param model_version: The model_version of this Application.
:type: str
"""
self._model_version = model_version
@property
def name(self):
"""
Gets the name of this Application.
Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.
:return: The name of this Application.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Application.
Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.
:param name: The name of this Application.
:type: str
"""
self._name = name
@property
def description(self):
"""
Gets the description of this Application.
Detailed description for the object.
:return: The description of this Application.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Application.
Detailed description for the object.
:param description: The description of this Application.
:type: str
"""
self._description = description
@property
def application_version(self):
"""
Gets the application_version of this Application.
The application's version.
:return: The application_version of this Application.
:rtype: int
"""
return self._application_version
@application_version.setter
def application_version(self, application_version):
"""
Sets the application_version of this Application.
The application's version.
:param application_version: The application_version of this Application.
:type: int
"""
self._application_version = application_version
@property
def object_status(self):
"""
Gets the object_status of this Application.
The status of an object that can be set to value 1 for shallow references across objects, other values reserved.
:return: The object_status of this Application.
:rtype: int
"""
return self._object_status
@object_status.setter
def object_status(self, object_status):
"""
Sets the object_status of this Application.
The status of an object that can be set to value 1 for shallow references across objects, other values reserved.
:param object_status: The object_status of this Application.
:type: int
"""
self._object_status = object_status
@property
def identifier(self):
"""
Gets the identifier of this Application.
Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified.
:return: The identifier of this Application.
:rtype: str
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""
Sets the identifier of this Application.
Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified.
:param identifier: The identifier of this Application.
:type: str
"""
self._identifier = identifier
@property
def parent_ref(self):
"""
Gets the parent_ref of this Application.
:return: The parent_ref of this Application.
:rtype: oci.data_integration.models.ParentReference
"""
return self._parent_ref
@parent_ref.setter
def parent_ref(self, parent_ref):
"""
Sets the parent_ref of this Application.
:param parent_ref: The parent_ref of this Application.
:type: oci.data_integration.models.ParentReference
"""
self._parent_ref = parent_ref
@property
def object_version(self):
"""
Gets the object_version of this Application.
The version of the object that is used to track changes in the object instance.
:return: The object_version of this Application.
:rtype: int
"""
return self._object_version
@object_version.setter
def object_version(self, object_version):
"""
Sets the object_version of this Application.
The version of the object that is used to track changes in the object instance.
:param object_version: The object_version of this Application.
:type: int
"""
self._object_version = object_version
@property
def dependent_object_metadata(self):
"""
Gets the dependent_object_metadata of this Application.
A list of dependent objects in this patch.
:return: The dependent_object_metadata of this Application.
:rtype: list[oci.data_integration.models.PatchObjectMetadata]
"""
return self._dependent_object_metadata
@dependent_object_metadata.setter
def dependent_object_metadata(self, dependent_object_metadata):
"""
Sets the dependent_object_metadata of this Application.
A list of dependent objects in this patch.
:param dependent_object_metadata: The dependent_object_metadata of this Application.
:type: list[oci.data_integration.models.PatchObjectMetadata]
"""
self._dependent_object_metadata = dependent_object_metadata
@property
def published_object_metadata(self):
"""
Gets the published_object_metadata of this Application.
A list of objects that are published or unpublished in this patch.
:return: The published_object_metadata of this Application.
:rtype: dict(str, PatchObjectMetadata)
"""
return self._published_object_metadata
@published_object_metadata.setter
def published_object_metadata(self, published_object_metadata):
"""
Sets the published_object_metadata of this Application.
A list of objects that are published or unpublished in this patch.
:param published_object_metadata: The published_object_metadata of this Application.
:type: dict(str, PatchObjectMetadata)
"""
self._published_object_metadata = published_object_metadata
@property
def source_application_info(self):
"""
Gets the source_application_info of this Application.
:return: The source_application_info of this Application.
:rtype: oci.data_integration.models.SourceApplicationInfo
"""
return self._source_application_info
@source_application_info.setter
def source_application_info(self, source_application_info):
"""
Sets the source_application_info of this Application.
:param source_application_info: The source_application_info of this Application.
:type: oci.data_integration.models.SourceApplicationInfo
"""
self._source_application_info = source_application_info
@property
def time_patched(self):
"""
Gets the time_patched of this Application.
The date and time the application was patched, in the timestamp format defined by RFC3339.
:return: The time_patched of this Application.
:rtype: datetime
"""
return self._time_patched
@time_patched.setter
def time_patched(self, time_patched):
"""
Sets the time_patched of this Application.
The date and time the application was patched, in the timestamp format defined by RFC3339.
:param time_patched: The time_patched of this Application.
:type: datetime
"""
self._time_patched = time_patched
@property
def metadata(self):
"""
Gets the metadata of this Application.
:return: The metadata of this Application.
:rtype: oci.data_integration.models.ObjectMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this Application.
:param metadata: The metadata of this Application.
:type: oci.data_integration.models.ObjectMetadata
"""
self._metadata = metadata
@property
def key_map(self):
"""
Gets the key_map of this Application.
A key map. If provided, key is replaced with generated key. This structure provides mapping between user provided key and generated key.
:return: The key_map of this Application.
:rtype: dict(str, str)
"""
return self._key_map
@key_map.setter
def key_map(self, key_map):
"""
Sets the key_map of this Application.
A key map. If provided, key is replaced with generated key. This structure provides mapping between user provided key and generated key.
:param key_map: The key_map of this Application.
:type: dict(str, str)
"""
self._key_map = key_map
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py | 1a33965c63a8df5ca0d8d0f254dda8b9f3dcd1ce | """Procedures to build trajectories for algorithms in the HMC family.
To propose a new state, algorithms in the HMC family generally proceed by [1]_:
1. Sampling a trajectory starting from the initial point;
2. Sampling a new state from this sampled trajectory.
Step (1) ensures that the process is reversible and thus that detailed balance
is respected. The traditional implementation of HMC does not sample a
trajectory, but instead takes a fixed number of steps in the same direction and
flips the momentum of the last state.
We distinguish here between two different methods to sample trajectories: static
and dynamic sampling. In the static setting we sample trajectories with a fixed
number of steps, while in the dynamic setting the total number of steps is
determined by a dynamic termination criterion. Traditional HMC falls in the
former category, NUTS in the latter.
There are also two methods to sample proposals from these trajectories. In the
static setting we first build the trajectory and then sample a proposal from
this trajectory. In the progressive setting we update the proposal as the
trajectory is being sampled. While the former is faster, we risk saturating the
memory by keeping states that will subsequently be discarded.
References
----------
.. [1]: Betancourt, Michael. "A conceptual introduction to Hamiltonian Monte Carlo." arXiv preprint arXiv:1701.02434 (2017).
"""
from typing import Callable, NamedTuple, Tuple
import jax
import jax.numpy as jnp
from blackjax.inference.hmc.integrators import IntegratorState
from blackjax.inference.hmc.proposal import (
Proposal,
progressive_biased_sampling,
progressive_uniform_sampling,
proposal_generator,
)
from blackjax.types import PRNGKey, PyTree
class Trajectory(NamedTuple):
leftmost_state: IntegratorState
rightmost_state: IntegratorState
momentum_sum: PyTree
num_states: int
def append_to_trajectory(trajectory: Trajectory, state: IntegratorState) -> Trajectory:
"""Append a state to the (right of the) trajectory to form a new trajectory."""
momentum_sum = jax.tree_util.tree_multimap(
jnp.add, trajectory.momentum_sum, state.momentum
)
return Trajectory(
trajectory.leftmost_state, state, momentum_sum, trajectory.num_states + 1
)
def reorder_trajectories(
direction: int, trajectory: Trajectory, new_trajectory: Trajectory
) -> Tuple[Trajectory, Trajectory]:
"""Order the two trajectories depending on the direction."""
return jax.lax.cond(
direction > 0,
lambda _: (
trajectory,
new_trajectory,
),
lambda _: (
new_trajectory,
trajectory,
),
operand=None,
)
def merge_trajectories(left_trajectory: Trajectory, right_trajectory: Trajectory):
momentum_sum = jax.tree_util.tree_multimap(
jnp.add, left_trajectory.momentum_sum, right_trajectory.momentum_sum
)
return Trajectory(
left_trajectory.leftmost_state,
right_trajectory.rightmost_state,
momentum_sum,
left_trajectory.num_states + right_trajectory.num_states,
)
# -------------------------------------------------------------------
# Integration
#
# Generating samples by choosing a direction and running the integrator
# several times along this direction. Distinct from sampling.
# -------------------------------------------------------------------
def static_integration(
integrator: Callable,
step_size: float,
num_integration_steps: int,
direction: int = 1,
) -> Callable:
"""Generate a trajectory by integrating several times in one direction."""
directed_step_size = direction * step_size
def integrate(initial_state: IntegratorState) -> IntegratorState:
def one_step(state, _):
state = integrator(state, directed_step_size)
return state, state
last_state, _ = jax.lax.scan(
one_step, initial_state, jnp.arange(num_integration_steps)
)
return last_state
return integrate
class DynamicIntegrationState(NamedTuple):
step: int
proposal: Proposal
trajectory: Trajectory
termination_state: NamedTuple
def dynamic_progressive_integration(
integrator: Callable,
kinetic_energy: Callable,
update_termination_state: Callable,
is_criterion_met: Callable,
divergence_threshold: float,
):
"""Integrate a trajectory and update the proposal sequentially in one direction
until the termination criterion is met.
Parameters
----------
integrator
The symplectic integrator used to integrate the hamiltonian trajectory.
kinetic_energy
Function to compute the current value of the kinetic energy.
update_termination_state
Updates the state of the termination mechanism.
is_criterion_met
Determines whether the termination criterion has been met.
divergence_threshold
Value of the difference of energy between two consecutive states above which we say a transition is divergent.
"""
_, generate_proposal = proposal_generator(kinetic_energy, divergence_threshold)
sample_proposal = progressive_uniform_sampling
def integrate(
rng_key: PRNGKey,
initial_state: IntegratorState,
direction: int,
termination_state,
max_num_steps: int,
step_size,
initial_energy,
):
"""Integrate the trajectory starting from `initial_state` and update
the proposal sequentially until the termination criterion is met.
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
termination_state
The state that keeps track of the information needed for the termination criterion.
max_num_steps
The maximum number of integration steps. The expansion will stop
when this number is reached if the termination criterion has not
been met.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial energy of the subtree)
"""
def do_keep_integrating(loop_state):
"""Decide whether we should continue integrating the trajectory"""
_, integration_state, (is_diverging, has_terminated) = loop_state
return (
(integration_state.step < max_num_steps)
& ~has_terminated
& ~is_diverging
)
def add_one_state(loop_state):
rng_key, integration_state, _ = loop_state
step, proposal, trajectory, termination_state = integration_state
rng_key, proposal_key = jax.random.split(rng_key)
new_state = integrator(trajectory.rightmost_state, direction * step_size)
new_proposal, is_diverging = generate_proposal(initial_energy, new_state)
# At step 0, we always accept the proposal, since we
# take one step to get the leftmost state of the tree.
(new_trajectory, sampled_proposal) = jax.lax.cond(
step == 0,
lambda _: (
Trajectory(new_state, new_state, new_state.momentum,1),
new_proposal,
),
lambda _: (
append_to_trajectory(trajectory, new_state),
sample_proposal(proposal_key, proposal, new_proposal),
),
operand=None,
)
new_termination_state = update_termination_state(
termination_state, new_trajectory.momentum_sum, new_state.momentum, step
)
has_terminated = is_criterion_met(
new_termination_state, new_trajectory.momentum_sum, new_state.momentum
)
new_integration_state = DynamicIntegrationState(
step + 1,
sampled_proposal,
new_trajectory,
new_termination_state,
)
return (rng_key, new_integration_state, (is_diverging, has_terminated))
proposal_placeholder, _ = generate_proposal(initial_energy, initial_state)
trajectory_placeholder = Trajectory(
initial_state, initial_state, initial_state.momentum, 0
)
integration_state_placeholder = DynamicIntegrationState(
0,
proposal_placeholder,
trajectory_placeholder,
termination_state,
)
_, integration_state, (is_diverging, has_terminated) = jax.lax.while_loop(
do_keep_integrating,
add_one_state,
(rng_key, integration_state_placeholder, (False, False)),
)
step, proposal, trajectory, termination_state = integration_state
# In the while_loop we always extend on the right most direction.
new_trajectory = jax.lax.cond(
direction > 0,
lambda _: trajectory,
lambda _: Trajectory(
trajectory.rightmost_state,
trajectory.leftmost_state,
trajectory.momentum_sum,
trajectory.num_states,
),
operand=None,
)
return (
proposal,
new_trajectory,
termination_state,
is_diverging,
has_terminated,
)
return integrate
def dynamic_recursive_integration(
integrator: Callable,
kinetic_energy: Callable,
uturn_check_fn: Callable,
divergence_threshold: float,
use_robust_uturn_check: bool = False,
):
"""Integrate a trajectory and update the proposal recursively in Python
until the termination criterion is met.
This is the implementation of Algorithm 6 from [1] with multinomial sampling.
The implemenation here is mostly for validating the progressive implementation
to make sure the two are equivalent. The recursive implementation should not
be used for actually sampling as it cannot be jitted and thus likely slow.
Parameters
----------
integrator
The symplectic integrator used to integrate the hamiltonian trajectory.
kinetic_energy
Function to compute the current value of the kinetic energy.
uturn_check_fn
Determines whether the termination criterion has been met.
divergence_threshold
Value of the difference of energy between two consecutive states above which we say a transition is divergent.
use_robust_uturn_check
Bool to indicate whether to perform additional U turn check between two trajectory.
References
----------
.. [1]: Hoffman, Matthew D., and Andrew Gelman. "The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo." J. Mach. Learn. Res. 15.1 (2014): 1593-1623.
"""
_, generate_proposal = proposal_generator(kinetic_energy, divergence_threshold)
sample_proposal = progressive_uniform_sampling
def buildtree_integrate(
rng_key: PRNGKey,
initial_state: IntegratorState,
direction: int,
tree_depth: int,
step_size,
initial_energy: float,
):
"""Integrate the trajectory starting from `initial_state` and update
the proposal recursively with tree doubling until the termination criterion is met.
The function `buildtree_integrate` calls itself for tree_depth > 0, thus invokes
the recursive scheme that builds a trajectory by doubling a binary tree.
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
tree_depth
The depth of the binary tree doubling.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial energy of the subtree)
"""
if tree_depth == 0:
# Base case - take one leapfrog step in the direction v.
next_state = integrator(initial_state, direction * step_size)
new_proposal, is_diverging = generate_proposal(initial_energy, next_state)
trajectory = Trajectory(next_state, next_state, next_state.momentum, 1)
return (
rng_key,
new_proposal,
trajectory,
is_diverging,
False,
)
else:
(
rng_key,
proposal,
trajectory,
is_diverging,
is_turning,
) = buildtree_integrate(
rng_key,
initial_state,
direction,
tree_depth - 1,
step_size,
initial_energy,
)
# Note that is_diverging and is_turning is inplace updated
if ~is_diverging & ~is_turning:
start_state = jax.lax.cond(
direction > 0,
lambda _: trajectory.rightmost_state,
lambda _: trajectory.leftmost_state,
operand=None,
)
(
rng_key,
new_proposal,
new_trajectory,
is_diverging,
is_turning,
) = buildtree_integrate(
rng_key,
start_state,
direction,
tree_depth - 1,
step_size,
initial_energy,
)
left_trajectory, right_trajectory = reorder_trajectories(
direction, trajectory, new_trajectory
)
trajectory = merge_trajectories(left_trajectory, right_trajectory)
if ~is_turning:
is_turning = uturn_check_fn(
trajectory.leftmost_state.momentum,
trajectory.rightmost_state.momentum,
trajectory.momentum_sum,
)
if use_robust_uturn_check & (tree_depth - 1 > 0):
momentum_sum_left = jax.tree_util.tree_multimap(
jnp.add,
left_trajectory.momentum_sum,
right_trajectory.leftmost_state.momentum,
)
is_turning_left = uturn_check_fn(
left_trajectory.leftmost_state.momentum,
right_trajectory.leftmost_state.momentum,
momentum_sum_left,
)
momentum_sum_right = jax.tree_util.tree_multimap(
jnp.add,
left_trajectory.rightmost_state.momentum,
right_trajectory.momentum_sum,
)
is_turning_right = uturn_check_fn(
left_trajectory.rightmost_state.momentum,
right_trajectory.rightmost_state.momentum,
momentum_sum_right,
)
is_turning = is_turning | is_turning_left | is_turning_right
rng_key, proposal_key = jax.random.split(rng_key)
proposal = sample_proposal(proposal_key, proposal, new_proposal)
return (
rng_key,
proposal,
trajectory,
is_diverging,
is_turning,
)
return buildtree_integrate
# -------------------------------------------------------------------
# Sampling
#
# Sampling a trajectory by choosing a direction at random and integrating
# the trajectory in this direction. In the simplest case we perform one
# integration step, but can also perform several as is the case in the
# NUTS algorithm.
# -------------------------------------------------------------------
class DynamicExpansionState(NamedTuple):
step: int
proposal: Proposal
trajectory: Trajectory
termination_state: NamedTuple
def dynamic_multiplicative_expansion(
trajectory_integrator: Callable,
uturn_check_fn: Callable,
step_size: float,
max_num_expansions: int = 10,
rate: int = 2,
) -> Callable:
"""Sample a trajectory and update the proposal sequentially
until the termination criterion is met.
The trajectory is sampled with the following procedure:
1. Pick a direction at random;
2. Integrate `num_step` steps in this direction;
3. If the integration has stopped prematurely, do not update the proposal;
4. Else if the trajectory is performing a U-turn, return current proposal;
5. Else update proposal, `num_steps = num_steps ** rate` and repeat from (1).
Parameters
----------
trajectory_integrator
A function that runs the symplectic integrators and returns a new proposal
and the integrated trajectory.
uturn_check_fn
Function used to check the U-Turn criterion.
step_size
The step size used by the symplectic integrator.
max_num_expansions
The maximum number of trajectory expansions until the proposal is
returned.
rate
The rate of the geometrical expansion. Typically 2 in NUTS, this is why
the literature often refers to "tree doubling".
"""
proposal_sampler = progressive_biased_sampling
def expand(
rng_key: PRNGKey,
initial_expansion_state: DynamicExpansionState,
initial_energy: float,
):
def do_keep_expanding(loop_state) -> bool:
"""Determine whether we need to keep expanding the trajectory."""
_, expansion_state, (is_diverging, is_turning) = loop_state
return (
(expansion_state.step < max_num_expansions)
& ~is_diverging
& ~is_turning
)
def expand_once(loop_state):
"""Expand the current trajectory.
At each step we draw a direction at random, build a subtrajectory starting
from the leftmost or rightmost point of the current trajectory that is
twice as long as the current trajectory.
Once that is done, possibly update the current proposal with that of
the subtrajectory.
"""
# Q: Should this function be aware of all the elements that need to
# be passed downstream?
rng_key, expansion_state, _ = loop_state
step, proposal, trajectory, termination_state = expansion_state
rng_key, direction_key, trajectory_key, proposal_key = jax.random.split(
rng_key, 4
)
# create new subtrajectory that is twice as long as the current
# trajectory.
direction = jnp.where(jax.random.bernoulli(direction_key), 1, -1)
start_state = jax.lax.cond(
direction > 0,
lambda _: trajectory.rightmost_state,
lambda _: trajectory.leftmost_state,
operand=None,
)
(
new_proposal,
new_trajectory,
termination_state,
is_diverging,
is_turning_subtree,
) = trajectory_integrator(
trajectory_key,
start_state,
direction,
termination_state,
rate ** step,
step_size,
initial_energy,
)
left_trajectory, right_trajectory = reorder_trajectories(
direction, trajectory, new_trajectory
)
merged_trajectory = merge_trajectories(left_trajectory, right_trajectory)
# update the proposal
# we reject proposals coming from diverging or turning subtrajectories,
# but accumulate average acceptance probabilty across entire trajectory
def update_sum_log_p_accept(inputs):
_, proposal, new_proposal = inputs
return Proposal(
proposal.state,
proposal.energy,
proposal.weight,
jnp.logaddexp(
proposal.sum_log_p_accept, new_proposal.sum_log_p_accept
),
)
sampled_proposal = jax.lax.cond(
is_diverging | is_turning_subtree,
update_sum_log_p_accept,
lambda x: proposal_sampler(*x),
operand=(proposal_key, proposal, new_proposal),
)
is_turning = uturn_check_fn(
merged_trajectory.leftmost_state.momentum,
merged_trajectory.rightmost_state.momentum,
merged_trajectory.momentum_sum,
)
new_state = DynamicExpansionState(
step + 1, sampled_proposal, merged_trajectory, termination_state
)
info = (is_diverging, is_turning_subtree | is_turning)
return (rng_key, new_state, info)
_, expansion_state, (is_diverging, is_turning) = jax.lax.while_loop(
do_keep_expanding,
expand_once,
(rng_key, initial_expansion_state, (False, False)),
)
return expansion_state, (is_diverging, is_turning)
return expand
|
py | 1a339671dcaf77f061cecb094042cdd070aa14bd | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from quantization.quantization_record_base import QuantizationRecordBase
def pad(params,
in_tensors,
qrec: QuantizationRecordBase,
details=None):
del qrec, details
if params.pad_type == "zero":
return [np.pad(in_tensors[0], params.padding.numpy_pad_shape(params.in_dims[0]),
'constant', constant_values=0)]
raise NotImplementedError()
|
py | 1a3398d4f42ef1660fa40152efe1fcef0f9720c2 | from __future__ import unicode_literals
from moto.core.exceptions import RESTError
class EmrError(RESTError):
code = 400
|
py | 1a3398f48cc3dfc5109e46d335bd6c262496a526 | # function for bubble sort
def Bubble_Sort(list):
for i in range(0, len(list) - 1):
for j in range(0, len(list) - i - 1):
# do swapping
if list[j] > list[j + 1]:
list[j], list[j + 1] = list[j + 1], list[j]
# function to print list
def Print_list(list):
for i in range(0, len(list)):
print(list[i], end = " ")
print()
list = [2, 4, 3, 1, 6, 8, 4]
Bubble_Sort(list)
Print_list(list)
# Output
# 1 2 3 4 4 6 8
|
py | 1a33998fab6579a0abcc4021db0fd51a1265b127 | # model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
pretrained='modelzoo://resnet50',
backbone=dict(
type='IPN_kite',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
with_att=False,
style='pytorch'),
neck=dict(
type='kiteFPN',
in_channels=[256, 256, 256, 256, 512, 512, 512, 1024, 1024, 2048],
out_channels=256,
with_att=False,
num_outs=10),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4,4/0.8333333,4/0.66666666,4/0.5,8/0.8333333,8/0.6666666,8/0.5,16/0.666666666,16/0.5,32/0.5],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4,4/0.8333333,4/0.66666666,4/0.5,8/0.8333333,8/0.6666666,8/0.5,16/0.666666666,16/0.5,32/0.5],),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss',
beta=1.0,
loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss',
beta=1.0,
loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss',
beta=1.0,
loss_weight=1.0))
])
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100),
keep_all_stages=False)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/cascade_rcnn_r50_fpn_1x_kite_woatt'
load_from = None#'./ckp/cascade_rcnn_r50_fpn_1x_20190501-3b6211ab.pth'
resume_from = None
workflow = [('train', 1)]
|
py | 1a339a46892fd0b443ef841cf869c1620c88b44d | import os
from PIL import Image, ImageDraw, ImageColor, ImageOps
from skimage.feature import hog
import numpy as np
def sliding_window(image, stepSize, windowSize):
for y in range(0, image.size[1], stepSize):
for x in range(0, image.size[0], stepSize):
# If the current crop would be outside of the image, skip it.
# Else, PIL will add a black part of the image, which will confuse the white percentage threshold and try to classify
# a box which isn't part of the original image.
if (x + windowSize[0]) > image.size[0] or (y + windowSize [1]) > image.size[1]:
continue
yield (x, y, image.crop([x, y, x + windowSize[1], y + windowSize[0]]))
def draw_red_square(x, y, target_image):
draw = ImageDraw.Draw(target_image)
draw.rectangle((x,y) + (x + 20, y + 20), outline="#ff0000")
return target_image
def create_dump_folder_for_images():
if os.path.exists("./dump"):
return
print('Creating dump directory for output images')
try:
os.mkdir("./dump")
print("Successfully created dump folder")
except OSError:
print("Could not create a dump folder. Please create one in the same path as this file")
def get_image_as_array(filepath, use_hog, expand_inverted):
img = Image.open(filepath)
img = img.convert(mode="L")
img.resize((20, 20))
return convert_image_to_array(img, use_hog, expand_inverted)
# General function for converting an image into a list representation.
# Allows for setting invertion of image and HOG features on the list.
# The function flattens the list representation and squashes its values into floats of numbers between 0 and 1.
# It will return an empty array if the image is completely white.
def convert_image_to_array(img, use_hog, expand_inverted):
if expand_inverted:
img = ImageOps.invert(img)
if use_hog:
img = hog(img, orientations=8, pixels_per_cell=(4, 4), cells_per_block=(4, 4), block_norm='L2', feature_vector=True)
list_image = np.array(img, dtype=float).flatten()
if list_image.max() == 0:
return []
return list_image
# Returns the percentage of the image consisting of completely white spots.
# This is used to set a threshold for which windows should be considered.
def get_percentage_of_white(img):
list_image = np.array(img, dtype=float).flatten()
numberOfWhite = np.count_nonzero(list_image == 255.)
return numberOfWhite/400 |
py | 1a339b7496c6e6b86c038b9be98e6c1128adac84 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from heat.common import template_format
from heat.engine import environment
from heat.engine import resource as res
from heat.engine import stack as parser
from heat.engine import template as templatem
from heat.objects import raw_template as raw_template_object
from heat.objects import resource as resource_objects
from heat.objects import stack as stack_object
from heat.objects import sync_point as sync_point_object
from heat.rpc import worker_client
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import utils
@mock.patch.object(worker_client.WorkerClient, 'check_resource')
class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
def setUp(self):
super(StackConvergenceCreateUpdateDeleteTest, self).setUp()
cfg.CONF.set_override('convergence_engine', True)
self.stack = None
@mock.patch.object(parser.Stack, 'mark_complete')
def test_converge_empty_template(self, mock_mc, mock_cr):
empty_tmpl = templatem.Template.create_empty_template()
stack = parser.Stack(utils.dummy_context(), 'empty_tmpl_stack',
empty_tmpl, convergence=True)
stack.store()
stack.converge_stack(template=stack.t, action=stack.CREATE)
self.assertFalse(mock_cr.called)
mock_mc.assert_called_once_with(stack.current_traversal)
def test_conv_wordpress_single_instance_stack_create(self, mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
convergence=True)
stack.store() # usually, stack is stored before converge is called
stack.converge_stack(template=stack.t, action=stack.CREATE)
self.assertIsNone(stack.ext_rsrcs_db)
self.assertEqual('Dependencies([((1, True), None)])',
repr(stack.convergence_dependencies))
stack_db = stack_object.Stack.get_by_id(stack.context, stack.id)
self.assertIsNotNone(stack_db.current_traversal)
self.assertIsNotNone(stack_db.raw_template_id)
self.assertIsNone(stack_db.prev_raw_template_id)
self.assertEqual(stack_db.convergence, True)
self.assertEqual({'edges': [[[1, True], None]]}, stack_db.current_deps)
leaves = stack.convergence_dependencies.leaves()
expected_calls = []
for rsrc_id, is_update in leaves:
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
stack.context, rsrc_id, stack.current_traversal,
{'input_data': {}},
is_update, None))
self.assertEqual(expected_calls, mock_cr.mock_calls)
def test_conv_string_five_instance_stack_create(self, mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
template=tools.string_template_five,
convergence=True)
stack.store()
stack.converge_stack(template=stack.t, action=stack.CREATE)
self.assertIsNone(stack.ext_rsrcs_db)
self.assertEqual('Dependencies(['
'((1, True), (3, True)), '
'((2, True), (3, True)), '
'((3, True), (4, True)), '
'((3, True), (5, True))])',
repr(stack.convergence_dependencies))
stack_db = stack_object.Stack.get_by_id(stack.context, stack.id)
self.assertIsNotNone(stack_db.current_traversal)
self.assertIsNotNone(stack_db.raw_template_id)
self.assertIsNone(stack_db.prev_raw_template_id)
self.assertEqual(stack_db.convergence, True)
self.assertEqual(sorted([[[3, True], [5, True]], # C, A
[[3, True], [4, True]], # C, B
[[1, True], [3, True]], # E, C
[[2, True], [3, True]]]), # D, C
sorted(stack_db.current_deps['edges']))
# check if needed_by is stored properly
expected_needed_by = {'A': [3], 'B': [3],
'C': [1, 2],
'D': [], 'E': []}
rsrcs_db = resource_objects.Resource.get_all_by_stack(
stack_db._context, stack_db.id
)
self.assertEqual(5, len(rsrcs_db))
for rsrc_name, rsrc_obj in rsrcs_db.items():
self.assertEqual(sorted(expected_needed_by[rsrc_name]),
sorted(rsrc_obj.needed_by))
self.assertEqual(stack_db.raw_template_id,
rsrc_obj.current_template_id)
# check if sync_points were stored
for entity_id in [5, 4, 3, 2, 1, stack_db.id]:
sync_point = sync_point_object.SyncPoint.get_by_key(
stack_db._context, entity_id, stack_db.current_traversal, True
)
self.assertIsNotNone(sync_point)
self.assertEqual(stack_db.id, sync_point.stack_id)
leaves = stack.convergence_dependencies.leaves()
expected_calls = []
for rsrc_id, is_update in leaves:
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
stack.context, rsrc_id, stack.current_traversal,
{'input_data': {}},
is_update, None))
self.assertEqual(expected_calls, mock_cr.mock_calls)
def _mock_convg_db_update_requires(self, key_id=False):
"""Updates requires column of resources.
Required for testing the generation of convergence dependency graph
on an update.
"""
requires = dict()
for rsrc_id, is_update in self.stack.convergence_dependencies:
if is_update:
reqs = self.stack.convergence_dependencies.requires((
rsrc_id, is_update))
requires[rsrc_id] = list({id for id, is_update in reqs})
rsrcs_db = resource_objects.Resource.get_all_by_stack(
self.stack.context, self.stack.id, key_id=key_id)
for rsrc_id, rsrc in rsrcs_db.items():
if rsrc.id in requires:
rsrcs_db[rsrc_id].requires = requires[rsrc.id]
return rsrcs_db
def test_conv_string_five_instance_stack_update(self, mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
template=tools.string_template_five,
convergence=True)
stack.store()
# create stack
stack.converge_stack(template=stack.t, action=stack.CREATE)
curr_stack_db = stack_object.Stack.get_by_id(stack.context, stack.id)
curr_stack = parser.Stack.load(curr_stack_db._context,
stack=curr_stack_db)
# update stack with new template
t2 = template_format.parse(tools.string_template_five_update)
template2 = templatem.Template(
t2, env=environment.Environment({'KeyName2': 'test2'}))
# on our previous create_complete, worker would have updated the
# rsrc.requires. Mock the same behavior here.
self.stack = stack
with mock.patch.object(
parser.Stack, '_db_resources_get',
side_effect=self._mock_convg_db_update_requires):
curr_stack.converge_stack(template=template2, action=stack.UPDATE)
self.assertIsNotNone(curr_stack.ext_rsrcs_db)
self.assertEqual('Dependencies(['
'((3, False), (1, False)), '
'((3, False), (2, False)), '
'((4, False), (3, False)), '
'((4, False), (4, True)), '
'((5, False), (3, False)), '
'((5, False), (5, True)), '
'((6, True), (8, True)), '
'((7, True), (8, True)), '
'((8, True), (4, True)), '
'((8, True), (5, True))])',
repr(curr_stack.convergence_dependencies))
stack_db = stack_object.Stack.get_by_id(curr_stack.context,
curr_stack.id)
self.assertIsNotNone(stack_db.raw_template_id)
self.assertIsNotNone(stack_db.current_traversal)
self.assertIsNotNone(stack_db.prev_raw_template_id)
self.assertEqual(True, stack_db.convergence)
self.assertEqual(sorted([[[7, True], [8, True]],
[[8, True], [5, True]],
[[8, True], [4, True]],
[[6, True], [8, True]],
[[3, False], [2, False]],
[[3, False], [1, False]],
[[5, False], [3, False]],
[[5, False], [5, True]],
[[4, False], [3, False]],
[[4, False], [4, True]]]),
sorted(stack_db.current_deps['edges']))
'''
To visualize:
G(7, True) H(6, True)
\ /
\ / B(4, False) A(5, False)
\ / / \ / /
\ / / /
F(8, True) / / \ /
/ \ / / C(3, False)
/ \ / / \
/ / \ /
/ / \ / / \
B(4, True) A(5, True) D(2, False) E(1, False)
Leaves are at the bottom
'''
# check if needed_by are stored properly
# For A & B:
# needed_by=C, F
expected_needed_by = {'A': [3, 8], 'B': [3, 8],
'C': [1, 2],
'D': [], 'E': [],
'F': [6, 7],
'G': [], 'H': []}
rsrcs_db = resource_objects.Resource.get_all_by_stack(
stack_db._context, stack_db.id
)
self.assertEqual(8, len(rsrcs_db))
for rsrc_name, rsrc_obj in rsrcs_db.items():
self.assertEqual(sorted(expected_needed_by[rsrc_name]),
sorted(rsrc_obj.needed_by))
# check if sync_points are created for forward traversal
# [F, H, G, A, B, Stack]
for entity_id in [8, 7, 6, 5, 4, stack_db.id]:
sync_point = sync_point_object.SyncPoint.get_by_key(
stack_db._context, entity_id, stack_db.current_traversal, True
)
self.assertIsNotNone(sync_point)
self.assertEqual(stack_db.id, sync_point.stack_id)
# check if sync_points are created for cleanup traversal
# [A, B, C, D, E]
for entity_id in [5, 4, 3, 2, 1]:
sync_point = sync_point_object.SyncPoint.get_by_key(
stack_db._context, entity_id, stack_db.current_traversal, False
)
self.assertIsNotNone(sync_point)
self.assertEqual(stack_db.id, sync_point.stack_id)
leaves = stack.convergence_dependencies.leaves()
expected_calls = []
for rsrc_id, is_update in leaves:
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
stack.context, rsrc_id, stack.current_traversal,
{'input_data': {}},
is_update, None))
leaves = curr_stack.convergence_dependencies.leaves()
for rsrc_id, is_update in leaves:
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
curr_stack.context, rsrc_id, curr_stack.current_traversal,
{'input_data': {}},
is_update, None))
self.assertEqual(expected_calls, mock_cr.mock_calls)
def test_conv_empty_template_stack_update_delete(self, mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
template=tools.string_template_five,
convergence=True)
stack.store()
# create stack
stack.converge_stack(template=stack.t, action=stack.CREATE)
# update stack with new template
template2 = templatem.Template.create_empty_template(
version=stack.t.version)
curr_stack_db = stack_object.Stack.get_by_id(stack.context, stack.id)
curr_stack = parser.Stack.load(curr_stack_db._context,
stack=curr_stack_db)
# on our previous create_complete, worker would have updated the
# rsrc.requires. Mock the same behavior here.
self.stack = stack
with mock.patch.object(
parser.Stack, '_db_resources_get',
side_effect=self._mock_convg_db_update_requires):
curr_stack.converge_stack(template=template2, action=stack.DELETE)
self.assertIsNotNone(curr_stack.ext_rsrcs_db)
self.assertEqual('Dependencies(['
'((3, False), (1, False)), '
'((3, False), (2, False)), '
'((4, False), (3, False)), '
'((5, False), (3, False))])',
repr(curr_stack.convergence_dependencies))
stack_db = stack_object.Stack.get_by_id(curr_stack.context,
curr_stack.id)
self.assertIsNotNone(stack_db.current_traversal)
self.assertIsNotNone(stack_db.prev_raw_template_id)
self.assertEqual(sorted([[[3, False], [2, False]],
[[3, False], [1, False]],
[[5, False], [3, False]],
[[4, False], [3, False]]]),
sorted(stack_db.current_deps['edges']))
expected_needed_by = {'A': [3], 'B': [3],
'C': [1, 2],
'D': [], 'E': []}
rsrcs_db = resource_objects.Resource.get_all_by_stack(
stack_db._context, stack_db.id
)
self.assertEqual(5, len(rsrcs_db))
for rsrc_name, rsrc_obj in rsrcs_db.items():
self.assertEqual(sorted(expected_needed_by[rsrc_name]),
sorted(rsrc_obj.needed_by))
# check if sync_points are created for cleanup traversal
# [A, B, C, D, E, Stack]
for entity_id in [5, 4, 3, 2, 1, stack_db.id]:
is_update = False
if entity_id == stack_db.id:
is_update = True
sync_point = sync_point_object.SyncPoint.get_by_key(
stack_db._context, entity_id, stack_db.current_traversal,
is_update)
self.assertIsNotNone(sync_point, 'entity %s' % entity_id)
self.assertEqual(stack_db.id, sync_point.stack_id)
leaves = stack.convergence_dependencies.leaves()
expected_calls = []
for rsrc_id, is_update in leaves:
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
stack.context, rsrc_id, stack.current_traversal,
{'input_data': {}},
is_update, None))
leaves = curr_stack.convergence_dependencies.leaves()
for rsrc_id, is_update in leaves:
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
curr_stack.context, rsrc_id, curr_stack.current_traversal,
{'input_data': {}},
is_update, None))
self.assertEqual(expected_calls, mock_cr.mock_calls)
def test_mark_complete_purges_db(self, mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
template=tools.string_template_five,
convergence=True)
stack.store()
stack.purge_db = mock.Mock()
stack.mark_complete(stack.current_traversal)
self.assertTrue(stack.purge_db.called)
@mock.patch.object(raw_template_object.RawTemplate, 'delete')
def test_purge_db_deletes_previous_template(self, mock_tmpl_delete,
mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
template=tools.string_template_five,
convergence=True)
stack.prev_raw_template_id = 10
stack.purge_db()
self.assertTrue(mock_tmpl_delete.called)
@mock.patch.object(raw_template_object.RawTemplate, 'delete')
def test_purge_db_does_not_delete_previous_template_when_stack_fails(
self, mock_tmpl_delete, mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
template=tools.string_template_five,
convergence=True)
stack.status = stack.FAILED
stack.purge_db()
self.assertFalse(mock_tmpl_delete.called)
def test_purge_db_deletes_sync_points(self, mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
template=tools.string_template_five,
convergence=True)
stack.store()
stack.purge_db()
rows = sync_point_object.SyncPoint.delete_all_by_stack_and_traversal(
stack.context, stack.id, stack.current_traversal)
self.assertEqual(0, rows)
@mock.patch.object(stack_object.Stack, 'delete')
def test_purge_db_deletes_stack_for_deleted_stack(self, mock_stack_delete,
mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
template=tools.string_template_five,
convergence=True)
stack.store()
stack.state_set(stack.DELETE, stack.COMPLETE, 'test reason')
stack.purge_db()
self.assertTrue(mock_stack_delete.called)
def test_get_best_existing_db_resource(self, mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
template=tools.string_template_five,
convergence=True)
stack.store()
stack.prev_raw_template_id = 2
stack.t.id = 1
dummy_res = stack.resources['A']
a_res_2 = res.Resource('A', dummy_res.t, stack)
a_res_2.current_template_id = 2
a_res_2.id = 2
a_res_3 = res.Resource('A', dummy_res.t, stack)
a_res_3.current_template_id = 3
a_res_3.id = 3
a_res_1 = res.Resource('A', dummy_res.t, stack)
a_res_1.current_template_id = 1
a_res_1.id = 1
existing_res = {2: a_res_2,
3: a_res_3,
1: a_res_1}
stack.ext_rsrcs_db = existing_res
best_res = stack._get_best_existing_rsrc_db('A')
# should return resource with template id 1 which is current template
self.assertEqual(a_res_1.id, best_res.id)
# no resource with current template id as 1
existing_res = {2: a_res_2,
3: a_res_3}
stack.ext_rsrcs_db = existing_res
best_res = stack._get_best_existing_rsrc_db('A')
# should return resource with template id 2 which is prev template
self.assertEqual(a_res_2.id, best_res.id)
class TestConvgStackRollback(common.HeatTestCase):
def setUp(self):
super(TestConvgStackRollback, self).setUp()
self.ctx = utils.dummy_context()
self.stack = tools.get_stack('test_stack_rollback', self.ctx,
template=tools.string_template_five,
convergence=True)
def test_trigger_rollback_uses_old_template_if_available(self):
# create a template and assign to stack as previous template
t = template_format.parse(tools.wp_template)
prev_tmpl = templatem.Template(t)
prev_tmpl.store(context=self.ctx)
self.stack.prev_raw_template_id = prev_tmpl.id
# mock failure
self.stack.action = self.stack.UPDATE
self.stack.status = self.stack.FAILED
self.stack.store()
# mock converge_stack()
self.stack.converge_stack = mock.Mock()
# call trigger_rollbac
self.stack.rollback()
# Make sure stack converge is called with previous template
self.assertTrue(self.stack.converge_stack.called)
self.assertIsNone(self.stack.prev_raw_template_id)
call_args, call_kwargs = self.stack.converge_stack.call_args
template_used_for_rollback = call_args[0]
self.assertEqual(prev_tmpl.id, template_used_for_rollback.id)
def test_trigger_rollback_uses_empty_template_if_prev_tmpl_not_available(
self):
# mock create failure with no previous template
self.stack.prev_raw_template_id = None
self.stack.action = self.stack.CREATE
self.stack.status = self.stack.FAILED
self.stack.store()
# mock converge_stack()
self.stack.converge_stack = mock.Mock()
# call trigger_rollback
self.stack.rollback()
# Make sure stack converge is called with empty template
self.assertTrue(self.stack.converge_stack.called)
call_args, call_kwargs = self.stack.converge_stack.call_args
template_used_for_rollback = call_args[0]
self.assertEqual({}, template_used_for_rollback['resources'])
class TestConvgComputeDependencies(common.HeatTestCase):
def setUp(self):
super(TestConvgComputeDependencies, self).setUp()
self.ctx = utils.dummy_context()
self.stack = tools.get_stack('test_stack_convg', self.ctx,
template=tools.string_template_five,
convergence=True)
def _fake_db_resources(self, stack):
db_resources = {}
i = 0
for rsrc_name in ['E', 'D', 'C', 'B', 'A']:
i += 1
rsrc = mock.MagicMock()
rsrc.id = i
rsrc.name = rsrc_name
rsrc.current_template_id = stack.prev_raw_template_id
db_resources[i] = rsrc
db_resources[3].requires = [4, 5]
db_resources[1].requires = [3]
db_resources[2].requires = [3]
return db_resources
def test_dependencies_create_stack_without_mock(self):
self.stack.store()
self.current_resources = self.stack._update_or_store_resources()
self.stack._compute_convg_dependencies(self.stack.ext_rsrcs_db,
self.stack.dependencies,
self.current_resources)
self.assertEqual('Dependencies(['
'((1, True), (3, True)), '
'((2, True), (3, True)), '
'((3, True), (4, True)), '
'((3, True), (5, True))])',
repr(self.stack._convg_deps))
def test_dependencies_update_same_template(self):
t = template_format.parse(tools.string_template_five)
tmpl = templatem.Template(t)
self.stack.t = tmpl
self.stack.t.id = 2
self.stack.prev_raw_template_id = 1
db_resources = self._fake_db_resources(self.stack)
curr_resources = {res.name: res for id, res in db_resources.items()}
self.stack._compute_convg_dependencies(db_resources,
self.stack.dependencies,
curr_resources)
self.assertEqual('Dependencies(['
'((1, False), (1, True)), '
'((1, True), (3, True)), '
'((2, False), (2, True)), '
'((2, True), (3, True)), '
'((3, False), (1, False)), '
'((3, False), (2, False)), '
'((3, False), (3, True)), '
'((3, True), (4, True)), '
'((3, True), (5, True)), '
'((4, False), (3, False)), '
'((4, False), (4, True)), '
'((5, False), (3, False)), '
'((5, False), (5, True))])',
repr(self.stack._convg_deps))
def test_dependencies_update_new_template(self):
t = template_format.parse(tools.string_template_five_update)
tmpl = templatem.Template(t)
self.stack.t = tmpl
self.stack.t.id = 2
self.stack.prev_raw_template_id = 1
db_resources = self._fake_db_resources(self.stack)
curr_resources = {res.name: res for id, res in db_resources.items()}
# 'H', 'G', 'F' are part of new template
i = len(db_resources)
for new_rsrc in ['H', 'G', 'F']:
i += 1
rsrc = mock.MagicMock()
rsrc.name = new_rsrc
rsrc.id = i
curr_resources[new_rsrc] = rsrc
self.stack._compute_convg_dependencies(db_resources,
self.stack.dependencies,
curr_resources)
self.assertEqual('Dependencies(['
'((3, False), (1, False)), '
'((3, False), (2, False)), '
'((4, False), (3, False)), '
'((4, False), (4, True)), '
'((5, False), (3, False)), '
'((5, False), (5, True)), '
'((6, True), (8, True)), '
'((7, True), (8, True)), '
'((8, True), (4, True)), '
'((8, True), (5, True))])',
repr(self.stack._convg_deps))
def test_dependencies_update_replace_rollback(self):
t = template_format.parse(tools.string_template_five)
tmpl = templatem.Template(t)
self.stack.t = tmpl
self.stack.t.id = 1
self.stack.prev_raw_template_id = 2
db_resources = self._fake_db_resources(self.stack)
# previous resource E still exists in db.
db_resources[1].current_template_id = 1
# resource that replaced E
res = mock.MagicMock()
res.id = 6
res.name = 'E'
res.requires = [3]
res.replaces = 1
res.current_template_id = 2
db_resources[6] = res
curr_resources = {res.name: res for id, res in db_resources.items()}
# best existing resource
curr_resources['E'] = db_resources[1]
self.stack._compute_convg_dependencies(db_resources,
self.stack.dependencies,
curr_resources)
self.assertEqual('Dependencies(['
'((1, False), (1, True)), '
'((1, False), (6, False)), '
'((1, True), (3, True)), '
'((2, False), (2, True)), '
'((2, True), (3, True)), '
'((3, False), (1, False)), '
'((3, False), (2, False)), '
'((3, False), (3, True)), '
'((3, False), (6, False)), '
'((3, True), (4, True)), '
'((3, True), (5, True)), '
'((4, False), (3, False)), '
'((4, False), (4, True)), '
'((5, False), (3, False)), '
'((5, False), (5, True))])',
repr(self.stack._convg_deps))
def test_dependencies_update_delete(self):
tmpl = templatem.Template.create_empty_template(
version=self.stack.t.version)
self.stack.t = tmpl
self.stack.t.id = 2
self.stack.prev_raw_template_id = 1
db_resources = self._fake_db_resources(self.stack)
curr_resources = {res.name: res for id, res in db_resources.items()}
self.stack._compute_convg_dependencies(db_resources,
self.stack.dependencies,
curr_resources)
self.assertEqual('Dependencies(['
'((3, False), (1, False)), '
'((3, False), (2, False)), '
'((4, False), (3, False)), '
'((5, False), (3, False))])',
repr(self.stack._convg_deps))
|
py | 1a339b77d5c52bba09a0c5ceba952b874fc44098 | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
'''Test waiting for db when db is available'''
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=None)
def test_wait_for_db(self, ts):
'''Test waiting for db'''
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
|
py | 1a339bd8d6ee09a176b6af5e24691f5e4e8f347b | #!/usr/bin/env python3
import subprocess
import re
import sys
RESULT_RE = re.compile(r'(T|F|[^ |])')
BASIC_PROPOSITION_RE = re.compile(r'([A-Za-z]+)')
REPLACEMENTS = {'~': r'\neg', '&': r'\wedge', '|': r'\vee', '<->': r'\leftrightarrow', '->': r'\rightarrow'}
wresult = subprocess.check_output(['hatt', '-e', sys.argv[1]])
result = [line.decode('UTF-8') for line in wresult.splitlines()]
del result[1] #row of --------
header_cols = re.findall(r'([A-Za-z] )+?| (.*)', result.pop(0))
expression = header_cols.pop()[1]
for k, v in REPLACEMENTS.items():
expression = expression.replace(k, v)
propositions = ['$' + x[0].strip() + '$' for x in header_cols]
print(str.format("\\begin{{tabular}}{{|{0}|c|}}", len(propositions) * 'c'))
print(r"\hline")
print(' & '.join(propositions), '& $', expression, r'$ \\')
print(r"\hline")
for line in result:
print(' & '.join(RESULT_RE.findall(line)), r'\\')
print(r"\hline")
print(r"\end{tabular}")
|
py | 1a339cc2238d50bcb8311fc9e264d74228b73e21 | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 martinqt <[email protected]> #
# Copyright 2014 Vincent Jacques <[email protected]> #
# Copyright 2016 Jannis Gebauer <[email protected]> #
# Copyright 2016 Peter Buckley <[email protected]> #
# Copyright 2018 sfdye <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import datetime
import github.GithubObject
import github.PaginatedList
import github.NamedUser
import github.Label
import six
class Milestone(github.GithubObject.CompletableGithubObject):
"""
This class represents Milestones. The reference can be found here http://developer.github.com/v3/issues/milestones/
"""
def __repr__(self):
return self.get__repr__({"number": self._number.value, "title": self._title.value})
@property
def closed_issues(self):
"""
:type: integer
"""
self._completeIfNotSet(self._closed_issues)
return self._closed_issues.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def creator(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._creator)
return self._creator.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def due_on(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._due_on)
return self._due_on.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def labels_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._labels_url)
return self._labels_url.value
@property
def number(self):
"""
:type: integer
"""
self._completeIfNotSet(self._number)
return self._number.value
@property
def open_issues(self):
"""
:type: integer
"""
self._completeIfNotSet(self._open_issues)
return self._open_issues.value
@property
def state(self):
"""
:type: string
"""
self._completeIfNotSet(self._state)
return self._state.value
@property
def title(self):
"""
:type: string
"""
self._completeIfNotSet(self._title)
return self._title.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo/milestones/:number <http://developer.github.com/v3/issues/milestones>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, title, state=github.GithubObject.NotSet, description=github.GithubObject.NotSet, due_on=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/milestones/:number <http://developer.github.com/v3/issues/milestones>`_
:param title: string
:param state: string
:param description: string
:param due_on: date
:rtype: None
"""
assert isinstance(title, (str, six.text_type)), title
assert state is github.GithubObject.NotSet or isinstance(state, (str, six.text_type)), state
assert description is github.GithubObject.NotSet or isinstance(description, (str, six.text_type)), description
assert due_on is github.GithubObject.NotSet or isinstance(due_on, datetime.date), due_on
post_parameters = {
"title": title,
}
if state is not github.GithubObject.NotSet:
post_parameters["state"] = state
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if due_on is not github.GithubObject.NotSet:
post_parameters["due_on"] = due_on.strftime("%Y-%m-%d")
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def get_labels(self):
"""
:calls: `GET /repos/:owner/:repo/milestones/:number/labels <http://developer.github.com/v3/issues/labels>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Label.Label`
"""
return github.PaginatedList.PaginatedList(
github.Label.Label,
self._requester,
self.url + "/labels",
None
)
@property
def _identity(self):
return self.number
def _initAttributes(self):
self._closed_issues = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._creator = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._due_on = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._labels_url = github.GithubObject.NotSet
self._number = github.GithubObject.NotSet
self._open_issues = github.GithubObject.NotSet
self._state = github.GithubObject.NotSet
self._title = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "closed_issues" in attributes: # pragma no branch
self._closed_issues = self._makeIntAttribute(attributes["closed_issues"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "creator" in attributes: # pragma no branch
self._creator = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["creator"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "due_on" in attributes: # pragma no branch
self._due_on = self._makeDatetimeAttribute(attributes["due_on"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "labels_url" in attributes: # pragma no branch
self._labels_url = self._makeStringAttribute(attributes["labels_url"])
if "number" in attributes: # pragma no branch
self._number = self._makeIntAttribute(attributes["number"])
if "open_issues" in attributes: # pragma no branch
self._open_issues = self._makeIntAttribute(attributes["open_issues"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "title" in attributes: # pragma no branch
self._title = self._makeStringAttribute(attributes["title"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
py | 1a339d1a62f359b41cf1774a492fa625f7ab0cb9 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
VERSION = "1.8.0b1"
|
py | 1a339d87327f2b1abe2b8fcedae8da55c3106705 | ###############################################################################
#
# ChartStock - A class for writing the Excel XLSX Stock charts.
#
# Copyright 2013-2019, John McNamara, [email protected]
#
from . import chart
class ChartStock(chart.Chart):
"""
A class for writing the Excel XLSX Stock charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, options=None):
"""
Constructor.
"""
super(ChartStock, self).__init__()
if options is None:
options = {}
self.show_crosses = 0
self.hi_low_lines = {}
self.date_category = True
# Override and reset the default axis values.
self.x_axis['defaults']['num_format'] = 'dd/mm/yyyy'
self.x2_axis['defaults']['num_format'] = 'dd/mm/yyyy'
# Set the available data label positions for this chart type.
self.label_position_default = 'right'
self.label_positions = {
'center': 'ctr',
'right': 'r',
'left': 'l',
'above': 't',
'below': 'b',
# For backward compatibility.
'top': 't',
'bottom': 'b'}
self.set_x_axis({})
self.set_x2_axis({})
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args):
# Override the virtual superclass method with a chart specific method.
# Write the c:stockChart element.
self._write_stock_chart(args)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_stock_chart(self, args):
# Write the <c:stockChart> element.
# Overridden to add hi_low_lines().
if args['primary_axes']:
series = self._get_primary_axes_series()
else:
series = self._get_secondary_axes_series()
if not len(series):
return
# Add default formatting to the series data.
self._modify_series_formatting()
self._xml_start_tag('c:stockChart')
# Write the series elements.
for data in series:
self._write_ser(data)
# Write the c:dropLines element.
self._write_drop_lines()
# Write the c:hiLowLines element.
if args.get('primary_axes'):
self._write_hi_low_lines()
# Write the c:upDownBars element.
self._write_up_down_bars()
# Write the c:axId elements
self._write_axis_ids(args)
self._xml_end_tag('c:stockChart')
def _modify_series_formatting(self):
# Add default formatting to the series data.
index = 0
for series in self.series:
if index % 4 != 3:
if not series['line']['defined']:
series['line'] = {'width': 2.25,
'none': 1,
'defined': 1}
if series['marker'] is None:
if index % 4 == 2:
series['marker'] = {'type': 'dot', 'size': 3}
else:
series['marker'] = {'type': 'none'}
index += 1
|
py | 1a339dad0a8892d326e33c1c6e578403d662f0f0 | from PIL import Image
import os
os.chdir("/Users/Joan/Documents/python/rex1168.github.io")
def change_size(job_name='thumbnails'):
jobs = {"thumbnails":
{'source': './static/thumbnail', 'target': './static/thumbnail-small'},
'course-covers':
{'source': './static/img/course_cover', 'target': './static/img/course_cover-small'}
}
# select job
job = jobs[job_name]
source = job['source']
target = job['target']
basewidth = 300
for root, dirs, filenames in os.walk(source):
path = os.path.join(target, root.split('/')[-1] if root.replace("/", "") != source.replace("/", "") else "")
if not os.path.isdir(path):
os.mkdir(path)
for fn in filenames:
extension = fn.split('.')[-1].lower()
if extension in ['jpg', 'png', 'jpeg']:
save_path = os.path.join(path, fn)
if not os.path.isfile(save_path):
img = Image.open(os.path.join(root, fn))
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth, hsize), Image.ANTIALIAS)
img.save(save_path)
print(save_path)
def reduce_icon(to_height=64):
for name in os.listdir('./static/img/icon'):
if name.split('.')[-1].lower() in ['jpg', 'png']:
im = Image.open("./static/img/icon/%s" % name)
w, h = im.size
h_ = h / to_height
w /= h_
im = im.resize((int(w), to_height), Image.ANTIALIAS)
im.save("./static/img/icon/%s" % name)
def reduce_single(path, to_height=256):
im = Image.open(path)
w, h = im.size
h_ = h / to_height
w /= h_
im = im.resize((int(w), to_height), Image.ANTIALIAS)
im.save(path)
if __name__ == "__main__":
job_name = ['thumbnails', 'course-covers'][1]
change_size(job_name)
# reduce_icon()
# reduce_single(path='static/img/description/more_update.png', to_height=130)
|
py | 1a339db0fc264095507bb7ddeb15a7dd997feacc | # """Assignment 03: Using inverse kinematics
# """
import json
import os
from compas_fab.backends import RosClient
from compas_fab.robots import Configuration
from compas.geometry import Frame
from compas.geometry import Point
from compas.geometry import Vector
from compas_fab.utilities import write_data_to_json
# This function defines the inputs of your assignment, you get a compas_fab.robots.Robot and a Frame
# and are expected to return ONE valid configuration to reach that frame
def calculate_ik(robot, frame):
# 1. define a valid start configuration for your frames
start_configuration = robot.zero_configuration()
# 2. use inverse kinematics to find out a valid configuration
configuration = robot.inverse_kinematics(frame, start_configuration)
# print("Found configuration", configuration)
return configuration
def store_configurations(configurations):
# 3. store all found configurations in a JSON file
here = os.path.dirname(__file__)
path = os.path.abspath(os.path.join(here, "json_file.json"))
configuration_json =[]
for configuration in configurations:
configuration_json.append(configuration.data)
write_data_to_json(configuration_json, path)
# pass
# Use the following to test from the command line
# Or copy solution_viewer.ghx next to the folder where you created assignment_03.py to visualize the same in Grasshopper
if __name__ == '__main__':
frame_list = [
Frame(Point(0.084, 0.319, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.152, 0.317, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.220, 0.315, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.288, 0.313, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.357, 0.310, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.425, 0.308, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.493, 0.306, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.561, 0.303, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.629, 0.301, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.698, 0.299, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.766, 0.297, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000))
]
# Loads the robot from ROS
with RosClient('localhost') as client:
robot = client.load_robot()
# And call our assignment functions for each frame in the example
configurations = []
for frame in frame_list:
configuration = calculate_ik(robot, frame)
configurations.append(configuration)
print("Found configuration", configuration)
store_configurations(configurations)
|
py | 1a339de73094f900208a629352e752356e0ddb23 | class ESDocsException(Exception):
pass
class InvalidFieldLookup(ESDocsException):
pass
class MissingSerializer(ESDocsException):
pass
|
py | 1a339e34b16e7807bfd04e79e892b782448377c5 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .inventory_item_properties import InventoryItemProperties
class VirtualMachineInventoryItem(InventoryItemProperties):
"""The VM inventory item.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param managed_resource_id: Gets or sets the tracked resource id
corresponding to the inventory resource.
:type managed_resource_id: str
:param mo_ref_id: Gets or sets the MoRef (Managed Object Reference) ID for
the inventory item.
:type mo_ref_id: str
:param mo_name: Gets or sets the vCenter Managed Object name for the
inventory item.
:type mo_name: str
:ivar provisioning_state: Gets or sets the provisioning state.
:vartype provisioning_state: str
:param inventory_type: Required. Constant filled by server.
:type inventory_type: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'inventory_type': {'required': True},
}
_attribute_map = {
'managed_resource_id': {'key': 'managedResourceId', 'type': 'str'},
'mo_ref_id': {'key': 'moRefId', 'type': 'str'},
'mo_name': {'key': 'moName', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'inventory_type': {'key': 'inventoryType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualMachineInventoryItem, self).__init__(**kwargs)
self.inventory_type = 'VirtualMachine'
|
py | 1a339f59e9926f0dc803bbd185a82ffcdf1505dc | from __future__ import unicode_literals
from mopidy_easywebradio import Extension, frontend as frontend_lib
def test_get_default_config():
ext = Extension()
config = ext.get_default_config()
assert '[easywebradio]' in config
assert 'enabled = true' in config
def test_get_config_schema():
ext = Extension()
schema = ext.get_config_schema()
# TODO Test the content of your config schema
#assert 'username' in schema
#assert 'password' in schema
# TODO Write more tests
|
py | 1a339fa8aec678ec4b7f1ed273eab849851ae9c7 | import requests
from PyKartRider import jsonInfo
from PyKartRider import apiKey
class UserNotFoundException(Exception):
"""
A class for exception when user is not found
"""
pass
class UnKnownException(Exception):
"""
A class for exception when unknown error happened
"""
pass
def get_user_id(username):
"""
A function that retrieves user id by username
:param username: the username to retrieve id from
:return: tuple type of (id, level)
"""
r = requests.get(("https://api.nexon.co.kr/kart/v1.0/users/nickname/" + username).encode("utf-8"),
headers={'Authorization': apiKey.key})
if r.status_code == 200:
user_id = r.json()['accessId']
user_level = r.json()['level']
return user_id, user_level
elif r.status_code == 404:
raise UserNotFoundException
else:
raise UnKnownException
def get_user_name(user_id):
"""
A function that retrieves username by id
:param user_id: the user id to retrive name from
:return: tupe type of (username, level)
"""
r = requests.get(("https://api.nexon.co.kr/kart/v1.0/users/" + user_id).encode("utf-8"),
headers={'Authorization': apiKey.key})
if r.status_code == 200:
user_id = r.json()['name']
user_level = r.json()['level']
return user_id, user_level
elif r.status_code == 404:
raise UserNotFoundException
else:
raise UnKnownException
|
py | 1a33a02dbf4ff1e625503a86ffcf45ed3a34bb4a | class Node:
# Constructor to initialize data
# If data is not given by user,its taken as None
def __init__(self, data=None, next=None, prev=None):
self.data = data
self.next = next
self.prev = prev
# method for setting the data field of the node
def set_data(self, data):
self.data = data
# method for getting the data field of the node
def get_data(self):
return self.data
# method for setting the next field of the node
def set_next(self, next):
self.next = next
# method for getting the next field of the node
def get_next(self):
return self.next
# returns true if the node points to another node
def has_next(self):
return self.next != None
# method for setting the next field of the node
def setPrev(self, prev):
self.prev = prev
# method for getting the next field of the node
def getPrev(self):
return self.prev
# returns true if the node points to another node
def hasPrev(self):
return self.prev != None
# __str__ returns string equivalent of Object
def __str__(self):
return "Node[Data = %s]" % (self.data,)
class DoubleLinkedList:
def __init__(self):
self.head = None
self.tail = None
def fwd_print(self):
current = self.head
if current == None:
print("No elements")
return False
while (current != None):
print (current.data)
current = current.next
return True
def insert(self,data):
if self.head is None:
self.head = self.tail = Node(data)
return True
current = self.head
while current.next is not None:
current = current.next
current.next = Node(data,None,current)
self.tail = current.next
def delete(self,data):
if self.head.data == data:
self.head = self.head.next
self.head.prev = None
return True
if self.head is None:
print("List is Empty...")
if self.tail.data == data:
self.tail = self.tail.prev
self.tail.next = None
return True
current = self.head
while current is not None:
if current.data == data:
current.prev.set_next(current.next)
current.next.setPrev(current.prev)
return True
current = current.next
return False
def insertAtBeginning(self,data):
if self.head is None:
self.head = self.tail = Node(data)
else:
self.head = Node(data,self.head,None)
def getNode(self,index):
current = self.head
if current == None:
return None
i = 0
while current.next is not None and i < index:
current = current.next
if current == None:
break
i += 1
return current
def insertAtGivenPosition(self,index,data):
if self.head == None or index == 0:
self.insertAtBeginning(data)
else index > 0:
temp = self.getNode(index)
if temp == None or temp.get_next() == None:
self.insert(data)
else:
newNode = Node(data,temp.get_next(),temp)
temp.get_next().setPrev(newNode)
temp.set_next(newNode)
if __name__ == '__main__':
# Initializing list
l = DoubleLinkedList()
# Inserting Values
l.insert(1)
l.insert(2)
l.insert(3)
l.insert(4)
l.insertAtBeginning(45)
print("animesh ",l.getNode(0))
l.fwd_print()
|
py | 1a33a04f933752029aa898624dc4a15bca6452d9 | """Tests that GPIO pins can be written to and read from"""
from user_inputs_settings import USER_INPUT_1, USER_INPUT_2, USER_INPUT_3 # pylint: disable=W0403
class UserInputs(object):
"""Tests that values can be written and read from a GPIO pin"""
def __init__(self):
self.gpio = None
if isinstance(self, UserInputOne):
self.gpio = USER_INPUT_1
elif isinstance(self, UserInputTwo):
self.gpio = USER_INPUT_2
elif isinstance(self, UserInputThree):
self.gpio = USER_INPUT_3
def test_high(self):
"""Tests that high values can be written and read from the GPIO pin"""
self.gpio.write(True)
if self.gpio.read() is True:
return True
return False
def test_low(self):
"""Tests that low values can be written and read from the GPIO pin"""
self.gpio.write(False)
if self.gpio.read() is False:
return True
return False
class UserInputOne(UserInputs):
"""UserInput 1 class"""
pass
class UserInputTwo(UserInputs):
"""UserInput 2 class"""
pass
class UserInputThree(UserInputs):
"""UserInput 3 class"""
pass
|
py | 1a33a085304f925333314f348e46dfbdf4e956bf | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_ceph_fs_volume_source import V1CephFSVolumeSource
class TestV1CephFSVolumeSource(unittest.TestCase):
""" V1CephFSVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1CephFSVolumeSource(self):
"""
Test V1CephFSVolumeSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_ceph_fs_volume_source.V1CephFSVolumeSource()
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a33a0acd42e77b7676f86dccf278cff360915f4 | # coding: iso-8859-15
import py
import random
from pypy.objspace.std.listobject import W_ListObject, SizeListStrategy,\
IntegerListStrategy, ObjectListStrategy
from pypy.interpreter.error import OperationError
from rpython.rlib.rarithmetic import is_valid_int
class TestW_ListObject(object):
def test_is_true(self):
w = self.space.wrap
w_list = W_ListObject(self.space, [])
assert self.space.is_true(w_list) == False
w_list = W_ListObject(self.space, [w(5)])
assert self.space.is_true(w_list) == True
w_list = W_ListObject(self.space, [w(5), w(3)])
assert self.space.is_true(w_list) == True
def test_len(self):
w = self.space.wrap
w_list = W_ListObject(self.space, [])
assert self.space.eq_w(self.space.len(w_list), w(0))
w_list = W_ListObject(self.space, [w(5)])
assert self.space.eq_w(self.space.len(w_list), w(1))
w_list = W_ListObject(self.space, [w(5), w(3), w(99)]*111)
assert self.space.eq_w(self.space.len(w_list), w(333))
def test_getitem(self):
w = self.space.wrap
w_list = W_ListObject(self.space, [w(5), w(3)])
assert self.space.eq_w(self.space.getitem(w_list, w(0)), w(5))
assert self.space.eq_w(self.space.getitem(w_list, w(1)), w(3))
assert self.space.eq_w(self.space.getitem(w_list, w(-2)), w(5))
assert self.space.eq_w(self.space.getitem(w_list, w(-1)), w(3))
self.space.raises_w(self.space.w_IndexError,
self.space.getitem, w_list, w(2))
self.space.raises_w(self.space.w_IndexError,
self.space.getitem, w_list, w(42))
self.space.raises_w(self.space.w_IndexError,
self.space.getitem, w_list, w(-3))
def test_getitems(self):
w = self.space.wrap
from pypy.objspace.std.listobject import make_range_list
r = make_range_list(self.space, 1,1,7)
l = [w(1),w(2),w(3),w(4),w(5),w(6),w(7)]
l2 = r.getitems()
for i in range(7):
assert self.space.eq_w(l[i], l2[i])
def test_getitems_fixedsize(self):
w = self.space.wrap
from pypy.objspace.std.listobject import make_range_list
rangelist = make_range_list(self.space, 1,1,7)
emptylist = W_ListObject(self.space, [])
intlist = W_ListObject(self.space, [w(1),w(2),w(3),w(4),w(5),w(6),w(7)])
strlist = W_ListObject(self.space, [w('1'),w('2'),w('3'),w('4'),w('5'),w('6'),w('7')])
floatlist = W_ListObject(self.space, [w(1.0),w(2.0),w(3.0),w(4.0),w(5.0),w(6.0),w(7.0)])
objlist = W_ListObject(self.space, [w(1),w('2'),w(3.0),w(4),w(5),w(6),w(7)])
emptylist_copy = emptylist.getitems_fixedsize()
assert emptylist_copy == []
rangelist_copy = rangelist.getitems_fixedsize()
intlist_copy = intlist.getitems_fixedsize()
strlist_copy = strlist.getitems_fixedsize()
floatlist_copy = floatlist.getitems_fixedsize()
objlist_copy = objlist.getitems_fixedsize()
for i in range(7):
assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i))
assert self.space.eq_w(intlist_copy[i], intlist.getitem(i))
assert self.space.eq_w(strlist_copy[i], strlist.getitem(i))
assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i))
assert self.space.eq_w(objlist_copy[i], objlist.getitem(i))
emptylist_copy = emptylist.getitems_unroll()
assert emptylist_copy == []
rangelist_copy = rangelist.getitems_unroll()
intlist_copy = intlist.getitems_unroll()
strlist_copy = strlist.getitems_unroll()
floatlist_copy = floatlist.getitems_unroll()
objlist_copy = objlist.getitems_unroll()
for i in range(7):
assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i))
assert self.space.eq_w(intlist_copy[i], intlist.getitem(i))
assert self.space.eq_w(strlist_copy[i], strlist.getitem(i))
assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i))
assert self.space.eq_w(objlist_copy[i], objlist.getitem(i))
def test_random_getitem(self):
w = self.space.wrap
s = list('qedx387tn3uixhvt 7fh387fymh3dh238 dwd-wq.dwq9')
w_list = W_ListObject(self.space, map(w, s))
keys = range(-len(s)-5, len(s)+5)
choices = keys + [None]*12
stepchoices = [None, None, None, 1, 1, -1, -1, 2, -2,
len(s)-1, len(s), len(s)+1,
-len(s)-1, -len(s), -len(s)+1]
for i in range(40):
keys.append(slice(random.choice(choices),
random.choice(choices),
random.choice(stepchoices)))
random.shuffle(keys)
for key in keys:
try:
expected = s[key]
except IndexError:
self.space.raises_w(self.space.w_IndexError,
self.space.getitem, w_list, w(key))
else:
w_result = self.space.getitem(w_list, w(key))
assert self.space.unwrap(w_result) == expected
def test_iter(self):
w = self.space.wrap
w_list = W_ListObject(self.space, [w(5), w(3), w(99)])
w_iter = self.space.iter(w_list)
assert self.space.eq_w(self.space.next(w_iter), w(5))
assert self.space.eq_w(self.space.next(w_iter), w(3))
assert self.space.eq_w(self.space.next(w_iter), w(99))
py.test.raises(OperationError, self.space.next, w_iter)
py.test.raises(OperationError, self.space.next, w_iter)
def test_contains(self):
w = self.space.wrap
w_list = W_ListObject(self.space, [w(5), w(3), w(99)])
assert self.space.eq_w(self.space.contains(w_list, w(5)),
self.space.w_True)
assert self.space.eq_w(self.space.contains(w_list, w(99)),
self.space.w_True)
assert self.space.eq_w(self.space.contains(w_list, w(11)),
self.space.w_False)
assert self.space.eq_w(self.space.contains(w_list, w_list),
self.space.w_False)
def test_getslice(self):
w = self.space.wrap
def test1(testlist, start, stop, step, expected):
w_slice = self.space.newslice(w(start), w(stop), w(step))
w_list = W_ListObject(self.space, [w(i) for i in testlist])
w_result = self.space.getitem(w_list, w_slice)
assert self.space.unwrap(w_result) == expected
for testlist in [[], [5,3,99]]:
for start in [-2, 0, 1, 10]:
for end in [-1, 2, 999]:
test1(testlist, start, end, 1, testlist[start:end])
test1([5,7,1,4], 3, 1, -2, [4,])
test1([5,7,1,4], 3, 0, -2, [4, 7])
test1([5,7,1,4], 3, -1, -2, [])
test1([5,7,1,4], -2, 11, 2, [1,])
test1([5,7,1,4], -3, 11, 2, [7, 4])
test1([5,7,1,4], -5, 11, 2, [5, 1])
def test_setslice(self):
w = self.space.wrap
def test1(lhslist, start, stop, rhslist, expected):
w_slice = self.space.newslice(w(start), w(stop), w(1))
w_lhslist = W_ListObject(self.space, [w(i) for i in lhslist])
w_rhslist = W_ListObject(self.space, [w(i) for i in rhslist])
self.space.setitem(w_lhslist, w_slice, w_rhslist)
assert self.space.unwrap(w_lhslist) == expected
test1([5,7,1,4], 1, 3, [9,8], [5,9,8,4])
test1([5,7,1,4], 1, 3, [9], [5,9,4])
test1([5,7,1,4], 1, 3, [9,8,6],[5,9,8,6,4])
test1([5,7,1,4], 1, 3, [], [5,4])
test1([5,7,1,4], 2, 2, [9], [5,7,9,1,4])
test1([5,7,1,4], 0, 99,[9,8], [9,8])
def test_add(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(-7)] * 111)
assert self.space.eq_w(self.space.add(w_list1, w_list1),
W_ListObject(self.space, [w(5), w(3), w(99),
w(5), w(3), w(99)]))
assert self.space.eq_w(self.space.add(w_list1, w_list2),
W_ListObject(self.space, [w(5), w(3), w(99)] +
[w(-7)] * 111))
assert self.space.eq_w(self.space.add(w_list1, w_list0), w_list1)
assert self.space.eq_w(self.space.add(w_list0, w_list2), w_list2)
def test_mul(self):
# only testing right mul at the moment
w = self.space.wrap
arg = w(2)
n = 3
w_lis = W_ListObject(self.space, [arg])
w_lis3 = W_ListObject(self.space, [arg]*n)
w_res = self.space.mul(w_lis, w(n))
assert self.space.eq_w(w_lis3, w_res)
# commute
w_res = self.space.mul(w(n), w_lis)
assert self.space.eq_w(w_lis3, w_res)
def test_mul_does_not_clone(self):
# only testing right mul at the moment
w = self.space.wrap
arg = w(2)
w_lis = W_ListObject(self.space, [arg])
w_lis.clone = None
# does not crash
self.space.mul(w_lis, w(5))
def test_setitem(self):
w = self.space.wrap
w_list = W_ListObject(self.space, [w(5), w(3)])
w_exp1 = W_ListObject(self.space, [w(5), w(7)])
w_exp2 = W_ListObject(self.space, [w(8), w(7)])
self.space.setitem(w_list, w(1), w(7))
assert self.space.eq_w(w_exp1, w_list)
self.space.setitem(w_list, w(-2), w(8))
assert self.space.eq_w(w_exp2, w_list)
self.space.raises_w(self.space.w_IndexError,
self.space.setitem, w_list, w(2), w(5))
self.space.raises_w(self.space.w_IndexError,
self.space.setitem, w_list, w(-3), w(5))
def test_random_setitem_delitem(self):
w = self.space.wrap
s = range(39)
w_list = W_ListObject(self.space, map(w, s))
expected = list(s)
keys = range(-len(s)-5, len(s)+5)
choices = keys + [None]*12
stepchoices = [None, None, None, 1, 1, -1, -1, 2, -2,
len(s)-1, len(s), len(s)+1,
-len(s)-1, -len(s), -len(s)+1]
for i in range(50):
keys.append(slice(random.choice(choices),
random.choice(choices),
random.choice(stepchoices)))
random.shuffle(keys)
n = len(s)
for key in keys:
if random.random() < 0.15:
random.shuffle(s)
w_list = W_ListObject(self.space, map(w, s))
expected = list(s)
try:
value = expected[key]
except IndexError:
self.space.raises_w(self.space.w_IndexError,
self.space.setitem, w_list, w(key), w(42))
else:
if is_valid_int(value): # non-slicing
if random.random() < 0.25: # deleting
self.space.delitem(w_list, w(key))
del expected[key]
else:
self.space.setitem(w_list, w(key), w(n))
expected[key] = n
n += 1
else: # slice assignment
mode = random.choice(['samesize', 'resize', 'delete'])
if mode == 'delete':
self.space.delitem(w_list, w(key))
del expected[key]
elif mode == 'samesize':
newvalue = range(n, n+len(value))
self.space.setitem(w_list, w(key), w(newvalue))
expected[key] = newvalue
n += len(newvalue)
elif mode == 'resize' and key.step is None:
newvalue = range(n, n+random.randrange(0, 20))
self.space.setitem(w_list, w(key), w(newvalue))
expected[key] = newvalue
n += len(newvalue)
assert self.space.unwrap(w_list) == expected
def test_eq(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)])
assert self.space.eq_w(self.space.eq(w_list0, w_list1),
self.space.w_False)
assert self.space.eq_w(self.space.eq(w_list1, w_list0),
self.space.w_False)
assert self.space.eq_w(self.space.eq(w_list1, w_list1),
self.space.w_True)
assert self.space.eq_w(self.space.eq(w_list1, w_list2),
self.space.w_True)
assert self.space.eq_w(self.space.eq(w_list2, w_list3),
self.space.w_False)
def test_ne(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)])
assert self.space.eq_w(self.space.ne(w_list0, w_list1),
self.space.w_True)
assert self.space.eq_w(self.space.ne(w_list1, w_list0),
self.space.w_True)
assert self.space.eq_w(self.space.ne(w_list1, w_list1),
self.space.w_False)
assert self.space.eq_w(self.space.ne(w_list1, w_list2),
self.space.w_False)
assert self.space.eq_w(self.space.ne(w_list2, w_list3),
self.space.w_True)
def test_lt(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)])
w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)])
assert self.space.eq_w(self.space.lt(w_list0, w_list1),
self.space.w_True)
assert self.space.eq_w(self.space.lt(w_list1, w_list0),
self.space.w_False)
assert self.space.eq_w(self.space.lt(w_list1, w_list1),
self.space.w_False)
assert self.space.eq_w(self.space.lt(w_list1, w_list2),
self.space.w_False)
assert self.space.eq_w(self.space.lt(w_list2, w_list3),
self.space.w_True)
assert self.space.eq_w(self.space.lt(w_list4, w_list3),
self.space.w_True)
def test_ge(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)])
w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)])
assert self.space.eq_w(self.space.ge(w_list0, w_list1),
self.space.w_False)
assert self.space.eq_w(self.space.ge(w_list1, w_list0),
self.space.w_True)
assert self.space.eq_w(self.space.ge(w_list1, w_list1),
self.space.w_True)
assert self.space.eq_w(self.space.ge(w_list1, w_list2),
self.space.w_True)
assert self.space.eq_w(self.space.ge(w_list2, w_list3),
self.space.w_False)
assert self.space.eq_w(self.space.ge(w_list4, w_list3),
self.space.w_False)
def test_gt(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)])
w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)])
assert self.space.eq_w(self.space.gt(w_list0, w_list1),
self.space.w_False)
assert self.space.eq_w(self.space.gt(w_list1, w_list0),
self.space.w_True)
assert self.space.eq_w(self.space.gt(w_list1, w_list1),
self.space.w_False)
assert self.space.eq_w(self.space.gt(w_list1, w_list2),
self.space.w_False)
assert self.space.eq_w(self.space.gt(w_list2, w_list3),
self.space.w_False)
assert self.space.eq_w(self.space.gt(w_list4, w_list3),
self.space.w_False)
def test_le(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)])
w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)])
assert self.space.eq_w(self.space.le(w_list0, w_list1),
self.space.w_True)
assert self.space.eq_w(self.space.le(w_list1, w_list0),
self.space.w_False)
assert self.space.eq_w(self.space.le(w_list1, w_list1),
self.space.w_True)
assert self.space.eq_w(self.space.le(w_list1, w_list2),
self.space.w_True)
assert self.space.eq_w(self.space.le(w_list2, w_list3),
self.space.w_True)
assert self.space.eq_w(self.space.le(w_list4, w_list3),
self.space.w_True)
def test_sizehint(self):
space = self.space
w_l = space.newlist([], sizehint=10)
assert isinstance(w_l.strategy, SizeListStrategy)
space.call_method(w_l, 'append', space.wrap(3))
assert isinstance(w_l.strategy, IntegerListStrategy)
w_l = space.newlist([], sizehint=10)
space.call_method(w_l, 'append', space.w_None)
assert isinstance(w_l.strategy, ObjectListStrategy)
def test_newlist_hint(self):
space = self.space
w_lst = space.newlist_hint(13)
assert isinstance(w_lst.strategy, SizeListStrategy)
assert w_lst.strategy.sizehint == 13
def test_find_fast_on_intlist(self, monkeypatch):
monkeypatch.setattr(self.space, "eq_w", None)
w = self.space.wrap
intlist = W_ListObject(self.space, [w(1),w(2),w(3),w(4),w(5),w(6),w(7)])
res = intlist.find(w(4), 0, 7)
assert res == 3
res = intlist.find(w(4), 0, 100)
assert res == 3
with py.test.raises(ValueError):
intlist.find(w(4), 4, 7)
with py.test.raises(ValueError):
intlist.find(w(4), 0, 2)
class AppTestListObject(object):
#spaceconfig = {"objspace.std.withliststrategies": True} # it's the default
def setup_class(cls):
import platform
import sys
on_cpython = (cls.runappdirect and
not hasattr(sys, 'pypy_translation_info'))
cls.w_on_cpython = cls.space.wrap(on_cpython)
cls.w_on_arm = cls.space.wrap(platform.machine().startswith('arm'))
cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
def test_doc(self):
assert list.__doc__ == "list() -> new empty list\nlist(iterable) -> new list initialized from iterable's items"
assert list.__new__.__doc__ == "Create and return a new object. See help(type) for accurate signature."
assert list.__init__.__doc__ == "Initialize self. See help(type(self)) for accurate signature."
def test_getstrategyfromlist_w(self):
l0 = ["a", "2", "a", True]
# this raised TypeError on ListStrategies
l1 = ["a", "2", True, "a"]
l2 = [1, "2", "a", "a"]
assert set(l1) == set(l2)
def test_notequals(self):
assert [1,2,3,4] != [1,2,5,4]
def test_contains(self):
l = []
assert not l.__contains__(2)
l = [1,2,3]
assert l.__contains__(2)
assert not l.__contains__("2")
assert l.__contains__(1.0)
l = ["1","2","3"]
assert l.__contains__("2")
assert not l.__contains__(2)
l = range(4)
assert l.__contains__(2)
assert not l.__contains__("2")
l = [1,2,"3"]
assert l.__contains__(2)
assert not l.__contains__("2")
l = range(2, 20, 3) # = [2, 5, 8, 11, 14, 17]
assert l.__contains__(2)
assert l.__contains__(5)
assert l.__contains__(8)
assert l.__contains__(11)
assert l.__contains__(14)
assert l.__contains__(17)
assert not l.__contains__(3)
assert not l.__contains__(4)
assert not l.__contains__(7)
assert not l.__contains__(13)
assert not l.__contains__(20)
l = range(2, -20, -3) # [2, -1, -4, -7, -10, -13, -16, -19]
assert l.__contains__(2)
assert l.__contains__(-4)
assert l.__contains__(-13)
assert l.__contains__(-16)
assert l.__contains__(-19)
assert not l.__contains__(-17)
assert not l.__contains__(-3)
assert not l.__contains__(-20)
assert not l.__contains__(-21)
logger = []
class Foo(object):
def __init__(self, value, name=None):
self.value = value
self.name = name or value
def __repr__(self):
return '<Foo %s>' % self.name
def __eq__(self, other):
logger.append((self, other))
return self.value == other.value
foo1, foo2, foo3 = Foo(1), Foo(2), Foo(3)
foo42 = Foo(42)
foo_list = [foo1, foo2, foo3]
foo42 in foo_list
logger_copy = logger[:] # prevent re-evaluation during pytest error print
assert logger_copy == [(foo42, foo1), (foo42, foo2), (foo42, foo3)]
del logger[:]
foo2_bis = Foo(2, '2 bis')
foo2_bis in foo_list
logger_copy = logger[:] # prevent re-evaluation during pytest error print
assert logger_copy == [(foo2_bis, foo1), (foo2_bis, foo2)]
def test_call_list(self):
assert list('') == []
assert list('abc') == ['a', 'b', 'c']
assert list((1, 2)) == [1, 2]
l = [1]
assert list(l) is not l
assert list(l) == l
def test_explicit_new_init(self):
l = l0 = list.__new__(list)
l.__init__([1,2])
assert l is l0
assert l == [1,2]
list.__init__(l, [1,2,3])
assert l is l0
assert l == [1,2,3]
list.__init__(l, ['a', 'b', 'c'])
assert l is l0
assert l == ['a', 'b', 'c']
list.__init__(l)
assert l == []
def test_explicit_new_init_more_cases(self):
for assignment in [[], (), [3], ["foo"]]:
l = [1, 2]
l.__init__(assignment)
assert l == list(assignment)
def test_range_init(self):
x = list(range(5,1))
assert x == []
x = list(range(1,10))
x[22:0:-1] == range(1,10)
r = list(range(10, 10))
assert len(r) == 0
assert list(reversed(r)) == []
assert r[:] == []
def test_extend_list(self):
l = l0 = [1]
l.extend([2])
assert l is l0
assert l == [1,2]
l = ['a']
l.extend('b')
assert l == ['a', 'b']
l = ['a']
l.extend([0])
assert l == ['a', 0]
l = list(range(10))
l.extend([10])
assert l == list(range(11))
l = []
m = [1,2,3]
l.extend(m)
m[0] = 5
assert m == [5,2,3]
assert l == [1,2,3]
def test_extend_tuple(self):
l = l0 = [1]
l.extend((2,))
assert l is l0
assert l == [1,2]
l = ['a']
l.extend(('b',))
assert l == ['a', 'b']
def test_extend_iterable(self):
l = l0 = [1]
l.extend(iter([1, 2, 3, 4]))
assert l is l0
assert l == [1, 1, 2, 3, 4]
l = l0 = ['a']
l.extend(iter(['b', 'c', 'd']))
assert l == ['a', 'b', 'c', 'd']
assert l is l0
l = l0 = [1.2]
l.extend(iter([2.3, 3.4, 4.5]))
assert l == [1.2, 2.3, 3.4, 4.5]
assert l is l0
def test_extend_iterable_length_hint_overflow(self):
import sys
class CustomIterable(object):
def __iter__(self):
if False:
yield
def __length_hint__(self):
return sys.maxsize
a = [1, 2, 3, 4]
a.extend(CustomIterable())
assert a == [1, 2, 3, 4]
def test_sort(self):
l = l0 = [1, 5, 3, 0]
l.sort()
assert l is l0
assert l == [0, 1, 3, 5]
l = l0 = []
l.sort()
assert l is l0
assert l == []
l = l0 = [1]
l.sort()
assert l is l0
assert l == [1]
l = ["c", "a", "d", "b"]
l.sort(reverse=True)
assert l == ["d", "c", "b", "a"]
l = [3.3, 2.2, 4.4, 1.1, 3.1, 5.5]
l.sort()
assert l == [1.1, 2.2, 3.1, 3.3, 4.4, 5.5]
def test_sort_key(self):
def lower(x): return x.lower()
l = ['a', 'C', 'b']
l.sort(key=lower)
assert l == ['a', 'b', 'C']
l = []
l.sort(key=lower)
assert l == []
l = ['a']
l.sort(key=lower)
assert l == ['a']
r = list(range(10))
r.sort(key=lambda x: -x)
assert r == list(range(9, -1, -1))
def test_sort_reversed(self):
l = list(range(10))
l.sort(reverse=True)
assert l == list(range(9, -1, -1))
l = []
l.sort(reverse=True)
assert l == []
l = [1]
l.sort(reverse=True)
assert l == [1]
raises(TypeError, sorted, [], None, lambda x, y: 0)
def test_sort_cmp_key_reverse(self):
def lower(x): return x.lower()
l = ['a', 'C', 'b']
l.sort(reverse = True, key = lower)
assert l == ['C', 'b', 'a']
def test_sort_simple_string(self):
l = ["a", "d", "c", "b"]
l.sort()
assert l == ["a", "b", "c", "d"]
def test_sort_range(self):
l = list(range(3, 10, 3))
l.sort()
assert l == [3, 6, 9]
l.sort(reverse=True)
assert l == [9, 6, 3]
l.sort(reverse=True)
assert l == [9, 6, 3]
l.sort()
assert l == [3, 6, 9]
def test_getitem(self):
l = [1, 2, 3, 4, 5, 6, 9]
assert l[0] == 1
assert l[-1] == 9
assert l[-2] == 6
raises(IndexError, "l[len(l)]")
raises(IndexError, "l[-len(l)-1]")
l = ['a', 'b', 'c']
assert l[0] == 'a'
assert l[-1] == 'c'
assert l[-2] == 'b'
raises(IndexError, "l[len(l)]")
l = [1.1, 2.2, 3.3]
assert l[0] == 1.1
assert l[-1] == 3.3
assert l[-2] == 2.2
raises(IndexError, "l[len(l)]")
l = []
raises(IndexError, "l[1]")
def test_getitem_range(self):
l = range(5)
raises(IndexError, "l[-6]")
raises(IndexError, "l[5]")
assert l[0] == 0
assert l[-1] == 4
assert l[-2] == 3
assert l[-5] == 0
l = range(1, 5)
raises(IndexError, "l[-5]")
raises(IndexError, "l[4]")
assert l[0] == 1
assert l[-1] == 4
assert l[-2] == 3
assert l[-4] == 1
def test_setitem(self):
l = []
raises(IndexError, "l[1] = 2")
l = [5,3]
l[0] = 2
assert l == [2,3]
l = [5,3]
l[0] = "2"
assert l == ["2",3]
l = list(range(3))
l[0] = 1
assert l == [1,1,2]
def test_delitem(self):
l = [1, 2, 3, 4, 5, 6, 9]
del l[0]
assert l == [2, 3, 4, 5, 6, 9]
del l[-1]
assert l == [2, 3, 4, 5, 6]
del l[-2]
assert l == [2, 3, 4, 6]
raises(IndexError, "del l[len(l)]")
raises(IndexError, "del l[-len(l)-1]")
l = l0 = ['a', 'b', 'c']
del l[0]
assert l == ['b', 'c']
del l[-1]
assert l == ['b']
del l[-1]
assert l == []
assert l is l0
raises(IndexError, "del l[0]")
l = l0 = [1.1, 2.2, 3.3]
del l[0]
assert l == [2.2, 3.3]
del l[-1]
assert l == [2.2]
del l[-1]
assert l == []
assert l is l0
raises(IndexError, "del l[0]")
l = list(range(10))
del l[5]
assert l == [0, 1, 2, 3, 4, 6, 7, 8, 9]
def test_getitem_slice(self):
l = list(range(10))
assert l[::] == l
del l[::2]
assert l == [1,3,5,7,9]
l[-2::-1] = l[:-1]
assert l == [7,5,3,1,9]
del l[-1:2:-1]
assert l == [7,5,3]
del l[:2]
assert l == [3]
assert l[1:] == []
assert l[1::2] == []
assert l[::] == l
assert l[0::-2] == l
assert l[-1::-5] == l
l = ['']
assert l[1:] == []
assert l[1::2] == []
assert l[::] == l
assert l[0::-5] == l
assert l[-1::-5] == l
l.extend(['a', 'b'])
assert l[::-1] == ['b', 'a', '']
l = [1,2,3,4,5]
assert l[1:0:None] == []
assert l[1:0] == []
def test_getslice_invalid(self):
x = [1,2,3,4]
assert x[10:0] == []
assert x[10:0:None] == []
x = list(range(1,5))
assert x[10:0] == []
assert x[10:0:None] == []
assert x[0:22] == [1,2,3,4]
assert x[-1:10] == [4]
assert x[0:22:None] == [1,2,3,4]
assert x[-1:10:None] == [4]
def test_getslice_range_backwards(self):
x = list(range(1,10))
assert x[22:-10] == []
assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1]
assert x[10:3:-1] == [9,8,7,6,5]
assert x[10:3:-2] == [9,7,5]
assert x[1:5:-1] == []
def test_delall(self):
l = l0 = [1,2,3]
del l[:]
assert l is l0
assert l == []
l = ['a', 'b']
del l[:]
assert l == []
l = [1.1, 2.2]
del l[:]
assert l == []
def test_clear(self):
l = l0 = [1,2,3]
l.clear()
assert l is l0
assert l == []
l = ['a', 'b']
l.clear()
assert l == []
l = [1.1, 2.2]
l.clear()
assert l == []
l = []
l.clear()
assert l == []
def test_iadd(self):
l = l0 = [1,2,3]
l += [4,5]
assert l is l0
assert l == [1,2,3,4,5]
l = l0 = [1.1,2.2,3.3]
l += [4.4,5.5]
assert l is l0
assert l == [1.1,2.2,3.3,4.4,5.5]
l = l0 = ['a', 'b', 'c']
l1 = l[:]
l += ['d']
assert l is l0
assert l == ['a', 'b', 'c', 'd']
l1 += [0]
assert l1 == ['a', 'b', 'c', 0]
r1 = r2 = list(range(5))
assert r1 is r2
r1 += [15]
assert r1 is r2
assert r1 == [0, 1, 2, 3, 4, 15]
assert r2 == [0, 1, 2, 3, 4, 15]
def test_iadd_iterable(self):
l = l0 = [1,2,3]
l += iter([4,5])
assert l is l0
assert l == [1,2,3,4,5]
def test_iadd_subclass(self):
class Bar(object):
def __radd__(self, other):
return ('radd', self, other)
bar = Bar()
l1 = [1,2,3]
l1 += bar
assert l1 == ('radd', bar, [1,2,3])
def test_add_lists(self):
l1 = [1,2,3]
l2 = [4,5,6]
l3 = l1 + l2
assert l3 == [1,2,3,4,5,6]
def test_imul(self):
l = l0 = [4,3]
l *= 2
assert l is l0
assert l == [4,3,4,3]
l *= 0
assert l is l0
assert l == []
l = l0 = [4,3]
l *= (-1)
assert l is l0
assert l == []
l = l0 = ['a', 'b']
l *= 2
assert l is l0
assert l == ['a', 'b', 'a', 'b']
l *= 0
assert l is l0
assert l == []
l = ['a']
l *= -5
assert l == []
l = l0 = [1.1, 2.2]
l *= 2
assert l is l0
assert l == [1.1, 2.2, 1.1, 2.2]
l = list(range(2))
l *= 2
assert l == [0, 1, 0, 1]
r1 = r2 = list(range(3))
assert r1 is r2
r1 *= 2
assert r1 is r2
assert r1 == [0, 1, 2, 0, 1, 2]
assert r2 == [0, 1, 2, 0, 1, 2]
def test_mul_errors(self):
try:
[1, 2, 3] * (3,)
except TypeError:
pass
def test_mul___index__(self):
class MyInt(object):
def __init__(self, x):
self.x = x
def __int__(self):
return self.x
class MyIndex(object):
def __init__(self, x):
self.x = x
def __index__(self):
return self.x
assert [0] * MyIndex(3) == [0, 0, 0]
raises(TypeError, "[0]*MyInt(3)")
raises(TypeError, "[0]*MyIndex(MyInt(3))")
def test_index(self):
c = list(range(10))
assert c.index(0) == 0
raises(ValueError, c.index, 10)
c = list('hello world')
assert c.index('l') == 2
raises(ValueError, c.index, '!')
assert c.index('l', 3) == 3
assert c.index('l', 4) == 9
raises(ValueError, c.index, 'l', 10)
assert c.index('l', -5) == 9
assert c.index('l', -25) == 2
assert c.index('o', 1, 5) == 4
raises(ValueError, c.index, 'o', 1, 4)
assert c.index('o', 1, 5-11) == 4
raises(ValueError, c.index, 'o', 1, 4-11)
raises(TypeError, c.index, 'c', 0, 4.3)
raises(TypeError, c.index, 'c', 1.0, 5.6)
c = [0, 2, 4]
assert c.index(0) == 0
raises(ValueError, c.index, 3)
c = [0.0, 2.2, 4.4]
assert c.index(0) == 0.0
e = raises(ValueError, c.index, 3)
import sys
if sys.version_info[:2] == (2, 7): # CPython 2.7, PyPy
assert str(e.value) == '3 is not in list'
def test_index_cpython_bug(self):
if self.on_cpython:
skip("cpython has a bug here")
c = list('hello world')
assert c.index('l', None, None) == 2
assert c.index('l', 3, None) == 3
assert c.index('l', None, 4) == 2
def test_ass_slice(self):
l = list(range(6))
l[1:3] = 'abc'
assert l == [0, 'a', 'b', 'c', 3, 4, 5]
l = []
l[:-3] = []
assert l == []
l = list(range(6))
l[:] = []
assert l == []
l = l0 = ['a', 'b']
l[1:1] = ['ae']
assert l == ['a', 'ae', 'b']
l[1:100] = ['B']
assert l == ['a', 'B']
l[:] = []
assert l == []
assert l is l0
l = []
l2 = range(3)
l.__setitem__(slice(0,3),l2)
assert l == [0,1,2]
def test_assign_extended_slice(self):
l = l0 = ['a', 'b', 'c']
l[::-1] = ['a', 'b', 'c']
assert l == ['c', 'b', 'a']
l[::-2] = [0, 1]
assert l == [1, 'b', 0]
l[-1:5:2] = [2]
assert l == [1, 'b', 2]
l[:-1:2] = [0]
assert l == [0, 'b', 2]
assert l is l0
l = [1,2,3]
raises(ValueError, "l[0:2:2] = [1,2,3,4]")
raises(ValueError, "l[::2] = []")
l = list(range(6))
l[::3] = ('a', 'b')
assert l == ['a', 1, 2, 'b', 4, 5]
l = [0.0, 1.1, 2.2, 3.3, 4.4, 5.5]
l[::3] = ('a', 'b')
assert l == ['a', 1.1, 2.2, 'b', 4.4, 5.5]
l_int = [5]; l_int.pop() # IntListStrategy
l_empty = [] # EmptyListStrategy
raises(ValueError, "l_int[::-1] = [42]")
raises(ValueError, "l_int[::7] = [42]")
raises(ValueError, "l_empty[::-1] = [42]")
raises(ValueError, "l_empty[::7] = [42]")
l_int[::1] = [42]; assert l_int == [42]
l_empty[::1] = [42]; assert l_empty == [42]
def test_setslice_with_self(self):
l = [1,2,3,4]
l[:] = l
assert l == [1,2,3,4]
l = [1,2,3,4]
l[0:2] = l
assert l == [1,2,3,4,3,4]
l = [1,2,3,4]
l[0:2] = l
assert l == [1,2,3,4,3,4]
l = [1,2,3,4,5,6,7,8,9,10]
raises(ValueError, "l[5::-1] = l")
l = [1,2,3,4,5,6,7,8,9,10]
raises(ValueError, "l[::2] = l")
l = [1,2,3,4,5,6,7,8,9,10]
l[5:] = l
assert l == [1,2,3,4,5,1,2,3,4,5,6,7,8,9,10]
l = [1,2,3,4,5,6]
l[::-1] = l
assert l == [6,5,4,3,2,1]
def test_setitem_slice_performance(self):
# because of a complexity bug, this used to take forever on a
# translated pypy. On CPython2.6 -A, it takes around 5 seconds.
if self.on_arm:
skip("consumes too much memory for most ARM machines")
if self.runappdirect:
count = 16*1024*1024
else:
count = 1024
b = [None] * count
for i in range(count):
b[i:i+1] = ['y']
assert b == ['y'] * count
def test_recursive_repr(self):
l = []
assert repr(l) == '[]'
l.append(l)
assert repr(l) == '[[...]]'
def test_copy(self):
# test that empty list copies the empty list
l = []
c = l.copy()
assert c == []
# test that the items of a list are the same
l = list(range(3))
c = l.copy()
assert l == c
# test that it's indeed a copy and not a reference
l = ['a', 'b']
c = l.copy()
c.append('i')
assert l == ['a', 'b']
assert c == l + ['i']
# test that it's a shallow, not a deep copy
l = [1, 2, [3, 4], 5]
c = l.copy()
assert l == c
assert c[3] == l[3]
raises(TypeError, l.copy, None)
def test_append(self):
l = []
l.append('X')
assert l == ['X']
l.append('Y')
l.append('Z')
assert l == ['X', 'Y', 'Z']
l = []
l.append(0)
assert l == [0]
for x in range(1, 5):
l.append(x)
assert l == list(range(5))
l = [1,2,3]
l.append("a")
assert l == [1,2,3,"a"]
l = [1.1, 2.2, 3.3]
l.append(4.4)
assert l == [1.1, 2.2, 3.3, 4.4]
l = list(range(4))
l.append(4)
assert l == list(range(5))
l = list(range(5))
l.append(26)
assert l == [0,1,2,3,4,26]
l = list(range(5))
l.append("a")
assert l == [0,1,2,3,4,"a"]
l = list(range(5))
l.append(5)
assert l == [0,1,2,3,4,5]
def test_count(self):
c = list('hello')
assert c.count('l') == 2
assert c.count('h') == 1
assert c.count('w') == 0
def test_insert(self):
c = list('hello world')
c.insert(0, 'X')
assert c[:4] == ['X', 'h', 'e', 'l']
c.insert(2, 'Y')
c.insert(-2, 'Z')
assert ''.join(c) == 'XhYello worZld'
ls = [1, 2, 3, 4, 5, 6, 7]
for i in range(5):
ls.insert(0, i)
assert len(ls) == 12
l = []
l.insert(4,2)
assert l == [2]
l = [1,2,3]
l.insert(0,"a")
assert l == ["a", 1, 2, 3]
l = list(range(3))
l.insert(1,5)
assert l == [0,5,1,2]
def test_pop(self):
c = list('hello world')
s = ''
for i in range(11):
s += c.pop()
assert s == 'dlrow olleh'
raises(IndexError, c.pop)
assert len(c) == 0
l = list(range(10))
l.pop()
assert l == list(range(9))
assert l.pop(0) == 0
l = [1.1, 2.2, 3.3]
l.pop()
assert l == [1.1, 2.2]
l = []
raises(IndexError, l.pop, 0)
def test_pop_custom_int(self):
class A(object):
def __init__(self, x):
self.x = x
def __int__(self):
return self.x
l = list(range(10))
x = l.pop(A(-1))
assert x == 9
assert l == list(range(9))
raises(TypeError, list(range(10)).pop, 1.0)
def test_pop_negative(self):
l1 = [1,2,3,4]
l2 = ["1", "2", "3", "4"]
l3 = list(range(5))
l4 = [1, 2, 3, "4"]
l5 = [1.1, 2.2, 3.3, 4.4]
raises(IndexError, l1.pop, -5)
raises(IndexError, l2.pop, -5)
raises(IndexError, l3.pop, -6)
raises(IndexError, l4.pop, -5)
raises(IndexError, l5.pop, -5)
assert l1.pop(-2) == 3
assert l2.pop(-2) == "3"
assert l3.pop(-2) == 3
assert l4.pop(-2) == 3
assert l5.pop(-2) == 3.3
def test_remove(self):
c = list('hello world')
c.remove('l')
assert ''.join(c) == 'helo world'
c.remove('l')
assert ''.join(c) == 'heo world'
c.remove('l')
assert ''.join(c) == 'heo word'
raises(ValueError, c.remove, 'l')
assert ''.join(c) == 'heo word'
l = list(range(5))
l.remove(2)
assert l == [0, 1, 3, 4]
l = [0, 3, 5]
raises(ValueError, c.remove, 2)
l = [0.0, 1.1, 2.2, 3.3, 4.4]
l.remove(2.2)
assert l == [0.0, 1.1, 3.3, 4.4]
l = [0.0, 3.3, 5.5]
raises(ValueError, c.remove, 2)
e = raises(ValueError, c.remove, 2.2)
if not self.on_cpython:
assert str(e.value) == 'list.remove(): 2.2 is not in list'
def test_reverse(self):
c = list('hello world')
c.reverse()
assert ''.join(c) == 'dlrow olleh'
l = list(range(3))
l.reverse()
assert l == [2,1,0]
r = list(range(3))
r[0] = 1
assert r == [1, 1, 2]
r.reverse()
assert r == [2, 1, 1]
def test_reversed(self):
assert list(list('hello').__reversed__()) == ['o', 'l', 'l', 'e', 'h']
assert list(reversed(list('hello'))) == ['o', 'l', 'l', 'e', 'h']
def test_mutate_while_remove(self):
class Mean(object):
def __init__(self, i):
self.i = i
def __eq__(self, other):
if self.i == 9:
del l[self.i - 1]
return True
else:
return False
l = [Mean(i) for i in range(10)]
# does not crash
l.remove(None)
class Mean2(object):
def __init__(self, i):
self.i = i
def __eq__(self, other):
l.append(self.i)
return False
l = [Mean2(i) for i in range(10)]
# does not crash
l.remove(5)
assert l[10:] == [0, 1, 2, 3, 4, 6, 7, 8, 9]
def test_mutate_while_contains(self):
class Mean(object):
def __init__(self, i):
self.i = i
def __eq__(self, other):
if self.i == 9 == other:
del l[0]
return True
else:
return False
l = [Mean(i) for i in range(10)]
assert l.__contains__(9)
assert not l.__contains__(2)
def test_mutate_while_extend(self):
# this used to segfault pypy-c (with py.test -A)
import sys
if hasattr(sys, 'pypy_translation_info'):
if sys.pypy_translation_info['translation.gc'] == 'boehm':
skip("not reliable on top of Boehm")
class A(object):
def __del__(self):
print('del')
del lst[:]
for i in range(10):
keepalive = []
lst = list(str(i)) * 100
A()
while lst:
keepalive.append(lst[:])
def test_unicode(self):
s = "\ufffd\ufffd\ufffd"
assert s.encode("ascii", "replace") == b"???"
assert s.encode("ascii", "ignore") == b""
l1 = [s.encode("ascii", "replace")]
assert l1[0] == b"???"
l2 = [s.encode("ascii", "ignore")]
assert l2[0] == b""
l3 = [s]
assert l3[0].encode("ascii", "replace") == b"???"
def test_list_from_set(self):
l = ['a']
l.__init__(set('b'))
assert l == ['b']
def test_list_from_generator(self):
l = ['a']
g = (i*i for i in range(5))
l.__init__(g)
assert l == [0, 1, 4, 9, 16]
l.__init__(g)
assert l == []
assert list(g) == []
def test_list_from_bytes(self):
b = list(b'abc')
assert b == [97, 98, 99]
def test_uses_custom_iterator(self):
# obscure corner case: space.listview*() must not shortcut subclasses
# of dicts, because the OrderedDict in the stdlib relies on this.
# we extend the use case to lists and sets, i.e. all types that have
# strategies, to avoid surprizes depending on the strategy.
class X: pass
for base, arg in [
(list, []), (list, [5]), (list, ['x']), (list, [X]), (list, ['x']),
(set, []), (set, [5]), (set, ['x']), (set, [X]), (set, ['x']),
(dict, []), (dict, [(5,6)]), (dict, [('x',7)]), (dict, [(X,8)]),
(dict, [('x', 7)]),
]:
print(base, arg)
class SubClass(base):
def __iter__(self):
return iter("foobar")
sub = SubClass(arg)
assert list(sub) == ['f', 'o', 'o', 'b', 'a', 'r']
l = []
l.extend(sub)
assert l == ['f', 'o', 'o', 'b', 'a', 'r']
# test another list strategy
l = ['Z']
l.extend(sub)
assert l == ['Z', 'f', 'o', 'o', 'b', 'a', 'r']
class Sub2(base):
pass
assert list(Sub2(arg)) == list(base(arg))
s = set()
s.update(Sub2(arg))
assert s == set(base(arg))
def test_comparison(self):
assert ([] < []) is False
assert ([] <= []) is True
assert ([] == []) is True
assert ([] != []) is False
assert ([] > []) is False
assert ([] >= []) is True
assert ([5] < []) is False
assert ([5] <= []) is False
assert ([5] == []) is False
assert ([5] != []) is True
assert ([5] > []) is True
assert ([5] >= []) is True
assert ([] < [5]) is True
assert ([] <= [5]) is True
assert ([] == [5]) is False
assert ([] != [5]) is True
assert ([] > [5]) is False
assert ([] >= [5]) is False
assert ([4] < [5]) is True
assert ([4] <= [5]) is True
assert ([4] == [5]) is False
assert ([4] != [5]) is True
assert ([4] > [5]) is False
assert ([4] >= [5]) is False
assert ([5] < [5]) is False
assert ([5] <= [5]) is True
assert ([5] == [5]) is True
assert ([5] != [5]) is False
assert ([5] > [5]) is False
assert ([5] >= [5]) is True
assert ([6] < [5]) is False
assert ([6] <= [5]) is False
assert ([6] == [5]) is False
assert ([6] != [5]) is True
assert ([6] > [5]) is True
assert ([6] >= [5]) is True
N = float('nan')
assert ([N] < [5]) is False
assert ([N] <= [5]) is False
assert ([N] == [5]) is False
assert ([N] != [5]) is True
assert ([N] > [5]) is False
assert ([N] >= [5]) is False
assert ([5] < [N]) is False
assert ([5] <= [N]) is False
assert ([5] == [N]) is False
assert ([5] != [N]) is True
assert ([5] > [N]) is False
assert ([5] >= [N]) is False
def test_resizelist_hint(self):
if self.on_cpython:
skip('pypy-only test')
import __pypy__
l2 = []
__pypy__.resizelist_hint(l2, 100)
l1 = [1, 2, 3]
l1[:] = l2
assert len(l1) == 0
def test_use_method_for_wrong_object(self):
if self.on_cpython:
skip('pypy-only test')
raises(TypeError, list.append, 1, 2)
def test_ne_NotImplemented(self):
class NonList(object):
pass
non_list = NonList()
assert [] != non_list
def test_extend_from_empty_list_with_subclasses(self):
# some of these tests used to fail by ignoring the
# custom __iter__() --- but only if the list has so
# far the empty strategy, as opposed to .extend()ing
# a non-empty list.
class T(tuple):
def __iter__(self):
yield "ok"
assert list(T([5, 6])) == ["ok"]
#
class L(list):
def __iter__(self):
yield "ok"
assert list(L([5, 6])) == ["ok"]
assert list(L([5.2, 6.3])) == ["ok"]
#
class S(bytes):
def __iter__(self):
yield "ok"
assert list(S(b"don't see me")) == ["ok"]
#
class U(str):
def __iter__(self):
yield "ok"
assert list(U("don't see me")) == ["ok"]
#
class S(bytes):
def __getitem__(self, index):
never_called
assert list(S(b"abc")) == list(b"abc") # __getitem__ ignored
#
class U(str):
def __getitem__(self, index):
never_called
assert list(U("abc")) == list("abc") # __getitem__ ignored
def test_extend_from_nonempty_list_with_subclasses(self):
l = ["hi!"]
class T(tuple):
def __iter__(self):
yield "okT"
l.extend(T([5, 6]))
#
class L(list):
def __iter__(self):
yield "okL"
l.extend(L([5, 6]))
l.extend(L([5.2, 6.3]))
#
class S(bytes):
def __iter__(self):
yield "okS"
l.extend(S(b"don't see me"))
#
class U(str):
def __iter__(self):
yield "okU"
l.extend(U("don't see me"))
#
assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"]
#
class S(bytes):
def __getitem__(self, index):
never_called
l = []
l.extend(S(b"abc"))
assert l == list(b"abc") # __getitem__ ignored
#
class U(str):
def __getitem__(self, index):
never_called
l = []
l.extend(U("abc"))
assert l == list("abc") # __getitem__ ignored
def test_issue1266(self):
l = list(range(1))
l.pop()
# would previously crash
l.append(1)
assert l == [1]
l = list(range(1))
l.pop()
# would previously crash
l.reverse()
assert l == []
def test_issue1266_ovf(self):
import sys
l = list(range(0, sys.maxsize, sys.maxsize))
l.append(sys.maxsize)
# -2 would be next in the range sequence if overflow were
# allowed
l.append(-2)
assert l == [0, sys.maxsize, -2]
assert -2 in l
l = list(range(-sys.maxsize, sys.maxsize, sys.maxsize // 10))
item11 = l[11]
assert l[::11] == [-sys.maxsize, item11]
assert item11 in l[::11]
def test_bug_list_of_nans(self):
N = float('nan')
L1 = [N, 'foo'] # general object strategy
assert N in L1
assert L1.index(N) == 0
assert L1 == [N, 'foo']
# our float list strategy needs to consider NaNs are equal!
L2 = [N, 0.0] # float strategy
assert N in L2
assert L2.index(N) == 0
assert L2.index(-0.0) == 1
assert L2 == [N, -0.0]
# same with the int-or-float list strategy
L3 = [N, 0.0, -0.0, 0]
assert N in L3
assert L3.index(N) == 0
for i in [1, 2, 3]:
assert L3[i] == 0
assert L3[i] == 0.0
assert L3[i] == -0.0
assert L3.index(0, i) == i
assert L3.index(0.0, i) == i
assert L3.index(-0.0, i) == i
class AppTestWithoutStrategies:
spaceconfig = {"objspace.std.withliststrategies": False}
def test_no_shared_empty_list(self):
l = []
copy = l[:]
copy.append({})
assert copy == [{}]
notshared = l[:]
assert notshared == []
class AppTestListFastSubscr:
spaceconfig = {"objspace.std.optimized_list_getitem": True}
def test_getitem(self):
import operator
l = [0, 1, 2, 3, 4]
for i in range(5):
assert l[i] == i
assert l[3:] == [3, 4]
raises(TypeError, operator.getitem, l, "str")
|
py | 1a33a1038148a82e883b202f9ff7317543efc038 | import torch
from torch.autograd import Variable
import time
import sys
from utils import *
def val_epoch(epoch, data_loader, model, criterion, opt, logger):
print('validation at epoch {}'.format(epoch))
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end_time = time.time()
for i, (inputs, targets) in enumerate(data_loader):
data_time.update(time.time() - end_time)
if not opt.no_cuda:
targets = targets.cuda()
with torch.no_grad():
inputs = Variable(inputs)
targets = Variable(targets)
outputs = model(inputs)
loss = criterion(outputs, targets)
prec1, prec5 = calculate_accuracy(outputs.data, targets.data, topk=(1,1))
top1.update(prec1, inputs.size(0))
top5.update(prec5, inputs.size(0))
losses.update(loss.data, inputs.size(0))
batch_time.update(time.time() - end_time)
end_time = time.time()
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.5f} ({batch_time.avg:.5f})\t'
'Data {data_time.val:.5f} ({data_time.avg:.5f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.5f} ({top1.avg:.5f})\t'
'Prec@5 {top5.val:.5f} ({top5.avg:.5f})'.format(
epoch,
i + 1,
len(data_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1,
top5=top5))
logger.log({'epoch': epoch,
'loss': losses.avg.item(),
'prec1': top1.avg.item(),
'prec5': top5.avg.item()})
return losses.avg.item(), top1.avg.item() |
py | 1a33a1ec16c552acf20a54f426473265a1407aa2 | import torch
import random
from collections import OrderedDict
import csv
import nltk
#This is the class which encodes training set json in the following structure
#todo: the structure
class imsitu_encoder():
def __init__(self, train_set, verb_questions, nouns, roleq):
# json structure -> {<img_id>:{frames:[{<role1>:<label1>, ...},{}...], verb:<verb1>}}
print('imsitu encoder initialization started.')
self.verb_list = []
self.role_list = []
self.max_label_count = 3
self.verb2_role_dict = {}
label_frequency = {}
self.max_role_count = 0
self.question_words = {}
self.max_q_word_count = 0
self.verb_questions = {}
self.vrole_question = {}
self.noun_list = []
for img_id, questions in verb_questions.items():
self.verb_questions[img_id]=[]
for question in questions:
self.verb_questions[img_id].append(question)
words_all = nltk.word_tokenize(question)
words_org = words_all[:-1] #ignore ? mark
if question != "what is the action happening?":
if '#UNK#' in question:
words = words_org[:3]
words.append(''.join(words_org[3:-1]))
words.append(words_org[-1])
else:
words = words_org[:3]
words.append(' '.join(words_org[3:-1]))
words.append(words_org[-1])
else:
words = words_org
if len(words) > self.max_q_word_count:
self.max_q_word_count = len(words)
#print('q words :', words)
for word in words:
if word not in self.question_words:
self.question_words[word] = len(self.question_words)
for verb, values in roleq.items():
roles = values['roles']
for role, info in roles.items():
question = info['question']
self.vrole_question[verb+'_'+role] = question
words = nltk.word_tokenize(question)
words = words[:-1] #ignore ? mark
if len(words) > self.max_q_word_count:
self.max_q_word_count = len(words)
#print('q words :', words)
for word in words:
if word not in self.question_words:
self.question_words[word] = len(self.question_words)
for img_id in train_set:
img = train_set[img_id]
current_verb = img['verb']
if current_verb not in self.verb_list:
self.verb_list.append(current_verb)
self.verb2_role_dict[current_verb] = []
for frame in img['frames']:
for role,label in frame.items():
if role not in self.role_list:
self.role_list.append(role)
if role not in self.verb2_role_dict[current_verb]:
self.verb2_role_dict[current_verb].append(role)
if len(self.verb2_role_dict[current_verb]) > self.max_role_count:
self.max_role_count = len(self.verb2_role_dict[current_verb])
if label not in self.noun_list:
self.noun_list.append(label)
self.agent_roles = ['agent', 'individuals','brancher', 'agenttype', 'gatherers', 'agents', 'teacher', 'traveler', 'mourner',
'seller', 'boaters', 'blocker', 'farmer']
label_frequency = {}
#self.verb_wise_items = {}
self.label_list = ['n10287213', '#UNK#', 'n10787470', 'n10714465', 'n10129825', 'n07942152', '', 'n05268112', 'n00015388', 'n02121620', 'n08249207', 'n09827683', 'n13104059', 'n09820263', 'n00007846', 'n04465501', 'n09918248', 'n09917593', 'n10285313', 'n03663781', 'n04100174', 'n02403454', 'n02402425', 'n02212062', 'n02206856', 'n05564590', 'n10514429', 'n09835506', 'n01503061', 'n02129165', 'n02084071', 'n10622053', 'n02416519', 'n02419796', 'n08208560', 'n10683126', 'n10448983', 'n02456962', 'n02958343', 'n04490091', 'n02374451', 'n10223177', 'n11669921', 'n10155849', 'n02691156', 'n10399491', 'n07891726', 'n10710632', 'n10665698', 'n02412080', 'n02411705', 'n09772029', 'n09986189', 'n04289027', 'n10694258', 'n10683349', 'n03593526', 'n14940386', 'n10679174', 'n09376198', 'n12158443', 'n03384352', 'n03699975', 'n07302836', 'n02159955', 'n10608188', 'n01639765', 'n02118333', 'n07989373', 'n08212347', 'n14845743', 'n09605289', 'n10689564', 'n03701640', 'n02421449', 'n03724870', 'n07271648', 'n09903153', 'n01698434', 'n02437136', 'n03833564', 'n10529965', 'n14857497', 'n10679054', 'n09963574', 'n02355227', 'n09411430', 'n09448361', 'n10530150', 'n03354903', 'n04306080', 'n02403325', 'n01726692', 'n09989502', 'n10366966', 'n02068974', 'n02924116', 'n09880741', 'n03256166', 'n03169390', 'n03636248', 'n09470550', 'n11508092', 'n02062744', 'n07988857', 'n02913152', 'n02389559', 'n10020890', 'n10225219', 'n10618342', 'n10439851', 'n04524313', 'n10334009', 'n09825750', 'n07734017', 'n07707451', 'n13149506', 'n10433164', 'n02437616', 'n10633450', 'n09247410', 'n10019406', 'n02512053', 'n03126707', 'n10161363', 'n02131653', 'n10247358', 'n02391994', 'n10476086', 'n09786338', 'n02876657', 'n07944050', 'n02130308', 'n10077593', 'n09879144', 'n02834778', 'n03944672', 'n03969041', 'n03512147', 'n02157557', 'n02812201', 'n02858304', 'n04194289', 'n09884391', 'n08078020', 'n10134001', 'n08221897', 'n02418465', 'n03251533', 'n10749715', 'n10378412', 'n09282724', 'n01887474', 'n02410702', 'n02508021', 'n10091651', 'n09843956', 'n10319796', 'n03388043', 'n07970406', 'n02076196', 'n11439690', 'n04347754', 'n02117135', 'n14625458', 'n14642417', 'n10034906', 'n10582746', 'n10470779', 'n07452074', 'n03623556', 'n01882714', 'n10426749', 'n10735984', 'n09765278', 'n14974264', 'n08248157', 'n04389033', 'n07739125', 'n10101634', 'n01915811', 'n13152742', 'n04306847', 'n10318892', 'n04401088', 'n10019552', 'n02236355', 'n04376876', 'n03816136', 'n02484322', 'n00017222', 'n07720442', 'n09992837', 'n14881303', 'n14841267', 'n10599806', 'n10004282', 'n02430045', 'n02274259', 'n03539875', 'n13112664', 'n10565667', 'n10464178', 'n03689157', 'n02782093', 'n11454591', 'n09436708', 'n10018021', 'n10542761', 'n10542888', 'n10104209', 'n03665924', 'n13085864', 'n03438257', 'n07886849', 'n07893642', 'n01500091', 'n03594945', 'n13134947', 'n10398176', 'n03976657', 'n02324045', 'n10415638', 'n04468005', 'n10367819', 'n05217168', 'n09984659', 'n15148467', 'n01674464', 'n10749528', 'n10770059', 'n03496892', 'n05399847', 'n11519450', 'n08238463', 'n09861946', 'n06839190', 'n01662784', 'n10502576', 'n08249038', 'n10804406', 'n03063338', 'n10340312', 'n13129165', 'n02190166', 'n10252547', 'n06613686', 'n14814616', 'n02790669', 'n14685296', 'n10546633', 'n10153594', 'n04253437', 'n10317007', 'n02444819', 'n02909870', 'n08494231', 'n09939313', 'n15228787', 'n02390101', 'n02916179', 'n04576002', 'n10661002', 'n10405694', 'n03888257', 'n07742704', 'n07758680', 'n04038727', 'n10521662', 'n10746931', 'n02114100', 'n09229409', 'n15041050', 'n09983572', 'n11501381', 'n07720875', 'n07649854', 'n05282433', 'n05302499', 'n03938244', 'n04451818', 'n11525955', 'n10480730', 'n13388245', 'n06780678', 'n04105068', 'n00021265', 'n09366762', 'n04208936', 'n07058468', 'n04463983', 'n04048568', 'n03325088', 'n09629752', 'n04183516', 'n09899289', 'n10698970', 'n02408429', 'n02769290', 'n07923748', 'n13740168', 'n10602985', 'n10368009', 'n09913455', 'n02342885', 'n02329401', 'n10298271', 'n03961939', 'n03241093', 'n03544360', 'n03127925', 'n03094503', 'n09397607', 'n09225146', 'n10773665', 'n09913593', 'n10560637', 'n09930876', 'n09931165', 'n10242682', 'n10079399', 'n10667477', 'n04108268', 'n02423362', 'n10380305', 'n03765561', 'n10510818', 'n02942699', 'n10519494', 'n03982060', 'n03543603', 'n10238375', 'n09821831', 'n12937678', 'n02125311', 'n12143676', 'n02404186', 'n10334957', 'n10641413', 'n10305802', 'n09617292', 'n03647520', 'n04096066', 'n10707804', 'n06286395', 'n04099429', 'n03160309', 'n09334396', 'n09673495', 'n15098161', 'n03862676', 'n03309808', 'n09636339', 'n07929351', 'n14930989', 'n12158798', 'n10389398', 'n02128925', 'n04304215', 'n10176111', 'n03484083', 'n02219486', 'n10048218', 'n02361587', 'n03525074', 'n09627263', 'n03990474', 'n11449907', 'n10112129', 'n02503517', 'n05563266', 'n10179291', 'n04456115', 'n02778669', 'n03814906', 'n01792042', 'n10639925', 'n14956325', 'n03346455', 'n03956922', 'n08184600', 'n04065272', 'n03147509', 'n03364340', 'n08079319', 'n10120671', 'n14877585', 'n13085113', 'n04467307', 'n07679356', 'n04284002', 'n10532058', 'n09892831', 'n01861778', 'n07710616', 'n07702796', 'n07802417', 'n01758757', 'n05238282', 'n02395406', 'n09359803', 'n09838895', 'n10391653', 'n02423022', 'n13163803', 'n13913849', 'n13163250', 'n04295881', 'n09919200', 'n03141702', 'n02761392', 'n03876519', 'n04726724', 'n03964744', 'n14942762', 'n08063650', 'n11464143', 'n12144580', 'n02480855', 'n02510455', 'n04411264', 'n04571292', 'n02948072', 'n03640988', 'n03106110', 'n05600637', 'n10749353', 'n04179913', 'n09833651', 'n02881193', 'n02127808', 'n04546855', 'n05538625', 'n07881800', 'n10427764', 'n08428485', 'n10577284', 'n03775199', 'n07609840', 'n10309896', 'n10534586', 'n03294048', 'n10151760', 'n03996416', 'n10376523', 'n03247083', 'n03837422', 'n02330245', 'n03665366', 'n04334599', 'n03239726', 'n00467995', 'n00523513', 'n11473954', 'n07943870', 'n09615807', 'n03769722', 'n10487182', 'n07844042', 'n15100644', 'n08188638', 'n04555897', 'n01888264', 'n13763626', 'n04141975', 'n13125117', 'n01604330', 'n01610955', 'n02933842', 'n09475292', 'n10368920', 'n00883297', 'n10722385', 'n03256788', 'n04594218', 'n04264914', 'n02898711', 'n04373894', 'n04507155', 'n08160276', 'n03348454', 'n10053808', 'n02127482', 'n03790512', 'n00377364', 'n03880531', 'n09805324', 'n03545470', 'n02363005', 'n10196490', 'n10150071', 'n07933274', 'n09273130', 'n07885223', 'n07773238', 'n03733805', 'n12905817', 'n05216365', 'n04210120', 'n04045397', 'n03482252', 'n04127904', 'n05254795', 'n04215402', 'n07003119', 'n07901587', 'n02866578', 'n02127052', 'n02792552', 'n04341686', 'n00470966', 'n07713895', 'n11986306', 'n09587565', 'n04038440', 'n15043763', 'n07583197', 'n14857897', 'n06239361', 'n02964389', 'n02970849', 'n01322685', 'n07266178', 'n10638922', 'n12433081', 'n00937656', 'n09328904', 'n09229709', 'n04223580', 'n03141327', 'n09426788', 'n04379243', 'n10305635', 'n08266235', 'n10223459', 'n09443453', 'n07927512', 'n12102133', 'n04399382', 'n05218119', 'n07858978', 'n03345487', 'n15101361', 'n14966667', 'n02728440', 'n03336459', 'n00002684', 'n08079852', 'n13001041', 'n09290777', 'n14975351', 'n03124590', 'n08588294', 'n02951843', 'n10914447', 'n14802450', 'n15019030', 'n04161358', 'n03740161', 'n02773037', 'n03277771', 'n03459591', 'n01888045', 'n10759047', 'n07747607', 'n10150940', 'n09450163', 'n08616311', 'n13384557', 'n10639359', 'n08322981', 'n12900462', 'n04526241', 'n01956481', 'n09376526', 'n03459914', 'n09834699', 'n08632096', 'n02747177', 'n04469514', 'n04251791', 'n03383948', 'n01899062', 'n07732636', 'n03378765', 'n00468480', 'n04199027', 'n02946921', 'n03764995', 'n04574999', 'n10471250', 'n04157320', 'n07753592', 'n07884567', 'n07764847', 'n03899328', 'n07620822', 'n08276720', 'n14844693', 'n07802026', 'n04191595', 'n09645091', 'n14915184', 'n07640203', 'n03075634', 'n03906997', 'n07270179', 'n03445924', 'n08613733', 'n03789946', 'n07303839', 'n01976957', 'n10123844', 'n02405302', 'n05261566', 'n09218315', 'n03717921', 'n05311054', 'n01922303', 'n05579944', 'n14818101', 'n07751004', 'n10299250', 'n09901143', 'n04317420', 'n09397391', 'n07697100', 'n03221720', 'n02743547', 'n04337974', 'n04493505', 'n02799175', 'n04578934', 'n15010703', 'n07859284', 'n03642806', 'n09303008', 'n04021798', 'n02797692', 'n13385216', 'n08524735', 'n04466613', 'n15055181', 'n03819994', 'n03249569', 'n03728437', 'n03322099', 'n09416076', 'n03950228', 'n06998748', 'n07711080', 'n03247620', 'n05305806', 'n07144834', 'n07705711', 'n03287733', 'n06410904', 'n02914991', 'n09270894', 'n13901321', 'n07614500', 'n07838073', 'n13100677', 'n04272054', 'n03649909', 'n03001627', 'n02795169', 'n13901211', 'n05578442', 'n10213319', 'n07405817', 'n06793231', 'n14956661', 'n02860415', 'n07805966', 'n02742753', 'n03664675', 'n03533972', 'n03100897', 'n04154565', 'n05834758', 'n13875185', 'n05690916', 'n10560106', 'n01794158', 'n03387815', 'n07860988', 'n04202417', 'n04190052', 'n08615149', 'n09347779', 'n08376250', 'n02999410', 'n03472112', 'n04460130', 'n03343560', 'n09215664', 'n08222293', 'n09308398', 'n03255648', 'n03800563', 'n03933529', 'n02959942', 'n05598147', 'n02916350', 'n03958752', 'n07210225', 'n14939900', 'n07569106', 'n14997012', 'n04143897', 'n09303528', 'n10695050', 'n08647616', 'n04415921', 'n04238128', 'n04217882', 'n03484931', 'n00440039', 'n04332243', 'n06624161', 'n06275634', 'n00478262', 'n02151625', 'n09460312', 'n07961480', 'n03648066', 'n00251013', 'n03316406', 'n03082979', 'n13900422', 'n03365592', 'n03219135', 'n04522168', 'n07303585', 'n03481172', 'n02852523', 'n04051549', 'n04333129', 'n14920844', 'n03768346', 'n03167464', 'n07303335', 'n10565048', 'n13144794', 'n03030663', 'n04188179', 'n07647731', 'n04131690', 'n08437515', 'n04459362', 'n03807537', 'n07601999', 'n03467984', 'n03881893', 'n04589745', 'n04081281', 'n03786901', 'n03404449', 'n03178782', 'n02934168', 'n04296562', 'n02883344', 'n02808440', 'n03875218', 'n03387653', 'n03659809', 'n03281145', 'n02156140', 'n13865904', 'n13111504', 'n13136556', 'n03996145', 'n03532672', 'n08436759', 'n02850732', 'n03359137', 'n07794159', 'n03495039', 'n07436475', 'n02973558', 'n02840245', 'n02754103', 'n06413889', 'n06508816', 'n08307589', 'n04544979', 'n04172342', 'n09405396', 'n04227144', 'n08569998', 'n04152829', 'n03908204', 'n03360300', 'n03461119', 'n13265011', 'n04489008', 'n04488857', 'n09304465', 'n12142085', 'n04197391', 'n03661340', 'n03305522', 'n14703797', 'n07597365', 'n04270147', 'n09227839', 'n03430959', 'n02822865', 'n07675627', 'n05560787', 'n14806598', 'n01460457', 'n02859084', 'n04594489', 'n03610524', 'n08570758', 'n07628870', 'n00023271', 'n04197235', 'n03603722', 'n03346898', 'n03241335', 'n02908217', 'n03682487', 'n13865298', 'n02153445', 'n04179126', 'n04296261', 'n04388743', 'n00173761', 'n04208210', 'n02815834', 'n02968473', 'n14759722', 'n02954938', 'n07792725', 'n03427296', 'n07673397', 'n09369169', 'n03815615', 'n04317833', 'n02887970', 'n03291819', 'n03714235', 'n03551790', 'n04493381', 'n07929519', 'n12648045', 'n07738353', 'n04037625', 'n08358332', 'n03584829', 'n03183080', 'n02818832', 'n04560113', 'n07829412', 'n04398044', 'n14985383', 'n08227214', 'n04330267', 'n02810471', 'n03895866', 'n08600443', 'n03206908', 'n14686913', 'n03676483', 'n03619890', 'n03589791', 'n04606574', 'n04151940', 'n02930766', 'n04140064', 'n08646902', 'n09604981', 'n04417809', 'n12205694', 'n02990373', 'n03596787', 'n15093938', 'n02687172', 'n07635155', 'n02780916', 'n03064758', 'n08401248', 'n13774404', 'n07804323', 'n07678729', 'n03959936', 'n02809364', 'n03416489', 'n04554684', 'n14592610', 'n14580897', 'n03320046', 'n04027023', 'n03038685', 'n03841666', 'n04519153', 'n03805725', 'n12141385', 'n04287153', 'n03259505', 'n03059528', 'n03345837', 'n07848338', 'n03354613', 'n07695965', 'n03931044', 'n03454707', 'n00136329', 'n00582071', 'n03547054', 'n09773245', 'n03570526', 'n04297476', 'n03405725', 'n03532342', 'n02732072', 'n02773838', 'n04122825', 'n03919289', 'n04105893', 'n03483316', 'n12901264', 'n02788689', 'n07873807']
self.label_2_qword = self.get_q_words_for_labels(self.label_list, self.question_words, nouns)
self.common_q_idx = self.get_commonq_idx(self.question_words)
self.verb2role_encoding = self.get_verb2role_encoding()
print('train set stats: \n\t verb count:', len(self.verb_list), '\n\t role count:',len(self.role_list),
'\n\t label count:', len(self.label_list), len(self.noun_list) ,
'\n\t max role count:', self.max_role_count,
'\n\t max q word count:', self.max_q_word_count)
def get_verb2role_encoding(self):
verb2role_embedding_list = []
for verb_id in range(len(self.verb_list)):
current_role_list = self.verb2_role_dict[self.verb_list[verb_id]]
role_embedding_verb = []
for role in current_role_list:
role_embedding_verb.append(1)
padding_count = self.max_role_count - len(role_embedding_verb)
for i in range(padding_count):
role_embedding_verb.append(0)
verb2role_embedding_list.append(torch.tensor(role_embedding_verb))
return verb2role_embedding_list
def encode(self, item):
verb = self.verb_list.index(item['verb'])
labels = self.get_label_ids(item['frames'])
return verb, labels
def get_q_words_for_labels(self, label_list, q_words, noun_dict):
label_r_qword = []
for label in label_list:
if label in noun_dict:
l_name = noun_dict[label]['gloss'][0]
idx = q_words[l_name]
else:
idx = q_words[label]
label_r_qword.append(idx)
return label_r_qword
def get_commonq_idx(self, q_words):
commonq = 'what is the doing'
words = nltk.word_tokenize(commonq)
idx_list = []
for w in words:
idx_list.append(q_words[w])
return torch.tensor(idx_list)
def get_verb_questions_batch(self, img_id_list):
verb_batch_list = []
for img_id in img_id_list:
rquestion_tokens = []
current_q_list = self.verb_questions[img_id]
for question in current_q_list:
q_tokens = []
words_all = nltk.word_tokenize(question)
words_org = words_all[:-1]
if question != "what is the action happening?":
if '#UNK#' in question:
words = words_org[:3]
words.append(''.join(words_org[3:-1]))
words.append(words_org[-1])
else:
words = words_org[:3]
words.append(' '.join(words_org[3:-1]))
words.append(words_org[-1])
else:
words = words_org
for word in words:
if word in self.question_words:
q_tokens.append(self.question_words[word])
else:
q_tokens.append(len(self.question_words))
padding_words = self.max_q_word_count - len(q_tokens)
for w in range(padding_words):
q_tokens.append(len(self.question_words))
rquestion_tokens.append(torch.tensor(q_tokens))
verb_batch_list.append(torch.stack(rquestion_tokens,0))
return torch.stack(verb_batch_list,0)
def get_qword_idx_for_agentq(self, agent_set):
agent_idx = []
for img in range(agent_set.size(0)):
curr_agent_set = []
for item in agent_set[img]:
idx = self.label_2_qword[item]
curr_agent_set.append(idx)
agent_idx.append(torch.tensor(curr_agent_set))
return torch.stack(agent_idx,0)
def get_verb2role_encoding(self):
verb2role_embedding_list = []
for verb_id in range(len(self.verb_list)):
current_role_list = self.verb2_role_dict[self.verb_list[verb_id]]
role_embedding_verb = []
for role in current_role_list:
role_embedding_verb.append(1)
padding_count = self.max_role_count - len(role_embedding_verb)
for i in range(padding_count):
role_embedding_verb.append(0)
verb2role_embedding_list.append(torch.tensor(role_embedding_verb))
return verb2role_embedding_list
def save_encoder(self):
return None
def load_encoder(self):
return None
def get_max_role_count(self):
return self.max_role_count
def get_num_verbs(self):
return len(self.verb_list)
def get_num_roles(self):
return len(self.role_list)
def get_num_labels(self):
return len(self.label_list)
def get_role_count(self, verb_id):
return len(self.verb2_role_dict[self.verb_list[verb_id]])
def get_role_ids_batch(self, verbs):
role_batch_list = []
q_len = []
for verb_id in verbs:
role_ids = self.get_role_ids(verb_id)
role_batch_list.append(role_ids)
return torch.stack(role_batch_list,0)
def get_role_questions_batch(self, verbs):
role_batch_list = []
q_len_batch = []
for verb_id in verbs:
rquestion_tokens = []
q_len = []
verb = self.verb_list[verb_id]
current_role_list = self.verb2_role_dict[verb]
for role in current_role_list:
question = self.vrole_question[verb+'_'+role]
#print('question :', question)
q_tokens = []
words = nltk.word_tokenize(question)
words = words[:-1]
for word in words:
q_tokens.append(self.question_words[word])
padding_words = self.max_q_word_count - len(q_tokens)
for w in range(padding_words):
q_tokens.append(len(self.question_words))
rquestion_tokens.append(torch.tensor(q_tokens))
q_len.append(len(words))
role_padding_count = self.max_role_count - len(current_role_list)
#todo : how to handle below sequence making for non roles properly?
for i in range(role_padding_count):
q_tokens = []
for k in range(0,self.max_q_word_count):
q_tokens.append(len(self.question_words))
rquestion_tokens.append(torch.tensor(q_tokens))
q_len.append(0)
role_batch_list.append(torch.stack(rquestion_tokens,0))
q_len_batch.append(torch.tensor(q_len))
return torch.stack(role_batch_list,0)
def get_role_ids(self, verb_id):
return self.verb2role_list[verb_id]
def get_role_questions(self, verb):
rquestion_tokens = []
q_len = []
current_role_list = self.verb2_role_dict[verb]
for role in current_role_list:
question = self.vrole_question[verb+'_'+role]
#print('question :', question)
q_tokens = []
words = nltk.word_tokenize(question)
words = words[:-1]
for word in words:
q_tokens.append(self.question_words[word])
padding_words = self.max_q_word_count - len(q_tokens)
for w in range(padding_words):
q_tokens.append(len(self.question_words))
rquestion_tokens.append(torch.tensor(q_tokens))
q_len.append(len(words))
role_padding_count = self.max_role_count - len(current_role_list)
#todo : how to handle below sequence making for non roles properly?
for i in range(role_padding_count):
q_tokens = []
for k in range(0,self.max_q_word_count):
q_tokens.append(len(self.question_words))
rquestion_tokens.append(torch.tensor(q_tokens))
q_len.append(0)
return torch.stack(rquestion_tokens,0), torch.tensor(q_len)
def get_verbq(self, id):
vquestion_tokens = []
question = self.verb_question[id]
words = nltk.word_tokenize(question)
words = words[:-1]
for word in words:
vquestion_tokens.append(self.question_words[word])
padding_words = self.max_q_word_count - len(vquestion_tokens)
for w in range(padding_words):
vquestion_tokens.append(len(self.question_words))
return torch.tensor(vquestion_tokens)
def get_label_ids(self, frames):
all_frame_id_list = []
for frame in frames:
label_id_list = []
for role,label in frame.items():
#use UNK when unseen labels come
if label in self.noun_list:
label_id = self.noun_list.index(label)
else:
label_id = self.noun_list.index('#UNK#')
label_id_list.append(label_id)
role_padding_count = self.max_role_count - len(label_id_list)
for i in range(role_padding_count):
label_id_list.append(len(self.noun_list))
all_frame_id_list.append(torch.tensor(label_id_list))
labels = torch.stack(all_frame_id_list,0)
return labels
def get_adj_matrix(self, verb_ids):
adj_matrix_list = []
for id in verb_ids:
#print('ids :', id)
encoding = self.verb2role_encoding[id]
encoding_tensor = torch.unsqueeze(torch.tensor(encoding),0)
role_count = self.get_role_count(id)
#print('role count :', role_count)
pad_count = self.max_role_count - role_count
expanded = encoding_tensor.expand(self.max_role_count, encoding_tensor.size(1))
transpose = torch.t(expanded)
adj = expanded*transpose
for idx in range(0,pad_count):
cur_idx = role_count + idx
adj[cur_idx][cur_idx] = 1
adj_matrix_list.append(adj)
return torch.stack(adj_matrix_list).type(torch.FloatTensor)
def get_adj_matrix_noself(self, verb_ids):
adj_matrix_list = []
for id in verb_ids:
#print('ids :', id)
encoding = self.verb2role_encoding[id]
encoding_tensor = torch.unsqueeze(torch.tensor(encoding),0)
role_count = self.get_role_count(id)
#print('role count :', role_count)
pad_count = self.max_role_count - role_count
expanded = encoding_tensor.expand(self.max_role_count, encoding_tensor.size(1))
transpose = torch.t(expanded)
adj = expanded*transpose
for idx1 in range(0,role_count):
adj[idx1][idx1] = 0
for idx in range(0,pad_count):
cur_idx = role_count + idx
adj[cur_idx][cur_idx] = 1
adj_matrix_list.append(adj)
return torch.stack(adj_matrix_list).type(torch.FloatTensor)
def getadj(self, verb_ids):
adj_matrix_list = []
for id in verb_ids:
#print('ids :', id)
'''encoding = self.verb2role_encoding[id]
encoding_tensor = torch.unsqueeze(torch.tensor(encoding),0)
role_count = self.get_role_count(id)
#print('role count :', role_count)
pad_count = self.max_role_count - role_count
expanded = encoding_tensor.expand(self.max_role_count, encoding_tensor.size(1))
transpose = torch.t(expanded)
adj = expanded*transpose'''
adj = torch.zeros(6, 6)
for idx in range(0,6):
adj[idx][idx] = 1
adj_matrix_list.append(adj)
return torch.stack(adj_matrix_list).type(torch.FloatTensor)
def get_mask(self, verb_ids, org_tensor):
org = org_tensor.clone()
org_reshaped = org.view(len(verb_ids), self.max_role_count, -1, org.size(2))
for i in range(0, len(verb_ids)):
role_encoding = self.verb2role_encoding[verb_ids[i]]
for j in range(0, len(role_encoding)):
#print('i:j', i,j)
if role_encoding[j] == 0:
org_reshaped[i][j:] = 0
break
return org_reshaped.view(org_tensor.size())
def get_verb_role_mask(self, verb_ids):
encoding_list = []
for id in verb_ids:
encoding = self.verb2role_encoding[id]
encoding_list.append(encoding)
return torch.stack(encoding_list).type(torch.FloatTensor)
def get_extended_encoding(self, verb_ids, dim):
encoding_list = []
for id in verb_ids:
encoding = self.verb2role_encoding[id]
encoding = torch.unsqueeze(torch.tensor(encoding),1)
#print('encoding unsqe :', encoding.size())
encoding = encoding.repeat(1,dim)
#encoding = torch.squeeze(encoding)
#print('extended :', encoding.size(), encoding)
encoding_list.append(encoding)
return torch.stack(encoding_list).type(torch.FloatTensor)
|
py | 1a33a23ef3376baf8a1e540e45e2e3cbbca55a5d | # Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api import http_pb2
from google.longrunning import operations_pb2
from google.protobuf import any_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
from google.rpc import status_pb2
from google.api_core.protobuf_helpers import get_messages
from google.cloud.language_v1beta2.proto import language_service_pb2
_shared_modules = [
http_pb2,
operations_pb2,
any_pb2,
descriptor_pb2,
empty_pb2,
timestamp_pb2,
status_pb2,
]
_local_modules = [language_service_pb2]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = "google.cloud.language_v1beta2.types"
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
|
py | 1a33a2b0c5d5ca8cb593b05b73f0f4621a36817a | # -*- coding: utf-8 -*-
"""
注意,本案例需要PyQt5.6以及以下版本才能运行。
否则会提示 cannot import name 'QtWebKitWidgets'
"""
"""
请注意是PyQt5.6以及以下!!!
请注意是PyQt5.6以及以下!!!
请注意是PyQt5.6以及以下!!!
"""
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QMainWindow
from Ui_plotly_matplotlib_pyqt import Ui_MainWindow
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import sys
from Plotly_PyQt5 import Plotly_PyQt5
class MainWindow(QMainWindow, Ui_MainWindow):
"""
Class documentation goes here.
"""
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget
@type QWidget
"""
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.plotly_pyqt5 = Plotly_PyQt5()
self.webView.setGeometry(QRect(50, 20, 1200, 600))
self.webView.load(QUrl.fromLocalFile(self.plotly_pyqt5.get_plot_path_matplotlib_plotly()))
app = QApplication(sys.argv)
win = MainWindow()
win.showMaximized()
app.exec_()
|
py | 1a33a473bd84b78ecb1d2d326635caae456853d7 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow testing subclass to automate numerical testing.
Reference tests determine when behavior deviates from some "gold standard," and
are useful for determining when layer definitions have changed without
performing full regression testing, which is generally prohibitive. This class
handles the symbolic graph comparison as well as loading weights to avoid
relying on random number generation, which can change.
The tests performed by this class are:
1) Compare a generated graph against a reference graph. Differences are not
necessarily fatal.
2) Attempt to load known weights for the graph. If this step succeeds but
changes are present in the graph, a warning is issued but does not raise
an exception.
3) Perform a calculation and compare the result to a reference value.
This class also provides a method to generate reference data.
Note:
The test class is responsible for fixing the random seed during graph
definition. A convenience method name_to_seed() is provided to make this
process easier.
The test class should also define a .regenerate() class method which (usually)
just calls the op definition function with test=False for all relevant tests.
A concise example of this class in action is provided in:
official/utils/testing/reference_data_test.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import hashlib
import json
import os
import shutil
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
class BaseTest(tf.test.TestCase):
"""TestCase subclass for performing reference data tests."""
def regenerate(self):
"""Subclasses should override this function to generate a new reference."""
raise NotImplementedError
@property
def test_name(self):
"""Subclass should define its own name."""
raise NotImplementedError
@property
def data_root(self):
"""Use the subclass directory rather than the parent directory.
Returns:
The path prefix for reference data.
"""
return os.path.join(os.path.split(
os.path.abspath(__file__))[0], "reference_data", self.test_name)
ckpt_prefix = "model.ckpt"
@staticmethod
def name_to_seed(name):
"""Convert a string into a 32 bit integer.
This function allows test cases to easily generate random fixed seeds by
hashing the name of the test. The hash string is in hex rather than base 10
which is why there is a 16 in the int call, and the modulo projects the
seed from a 128 bit int to 32 bits for readability.
Args:
name: A string containing the name of a test.
Returns:
A pseudo-random 32 bit integer derived from name.
"""
seed = hashlib.md5(name.encode("utf-8")).hexdigest()
return int(seed, 16) % (2**32 - 1)
@staticmethod
def common_tensor_properties(input_array):
"""Convenience function for matrix testing.
In tests we wish to determine whether a result has changed. However storing
an entire n-dimensional array is impractical. A better approach is to
calculate several values from that array and test that those derived values
are unchanged. The properties themselves are arbitrary and should be chosen
to be good proxies for a full equality test.
Args:
input_array: A numpy array from which key values are extracted.
Returns:
A list of values derived from the input_array for equality tests.
"""
output = list(input_array.shape)
flat_array = input_array.flatten()
output.extend([float(i) for i in
[flat_array[0], flat_array[-1], np.sum(flat_array)]])
return output
def default_correctness_function(self, *args):
"""Returns a vector with the concatenation of common properties.
This function simply calls common_tensor_properties() for every element.
It is useful as it allows one to easily construct tests of layers without
having to worry about the details of result checking.
Args:
*args: A list of numpy arrays corresponding to tensors which have been
evaluated.
Returns:
A list of values containing properties for every element in args.
"""
output = []
for arg in args:
output.extend(self.common_tensor_properties(arg))
return output
def _construct_and_save_reference_files(
self, name, graph, ops_to_eval, correctness_function):
"""Save reference data files.
Constructs a serialized graph_def, layer weights, and computation results.
It then saves them to files which are read at test time.
Args:
name: String defining the run. This will be used to define folder names
and will be used for random seed construction.
graph: The graph in which the test is conducted.
ops_to_eval: Ops which the user wishes to be evaluated under a controlled
session.
correctness_function: This function accepts the evaluated results of
ops_to_eval, and returns a list of values. This list must be JSON
serializable; in particular it is up to the user to convert numpy
dtypes into builtin dtypes.
"""
data_dir = os.path.join(self.data_root, name)
# Make sure there is a clean space for results.
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
os.makedirs(data_dir)
# Serialize graph for comparison.
graph_bytes = graph.as_graph_def().SerializeToString()
expected_file = os.path.join(data_dir, "expected_graph")
with tf.gfile.Open(expected_file, "wb") as f:
f.write(graph_bytes)
with graph.as_default():
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with self.test_session(graph=graph) as sess:
sess.run(init)
saver.save(sess=sess, save_path=os.path.join(
data_dir, self.ckpt_prefix))
# These files are not needed for this test.
os.remove(os.path.join(data_dir, "checkpoint"))
os.remove(os.path.join(data_dir, self.ckpt_prefix + ".meta"))
# ops are evaluated even if there is no correctness function to ensure
# that they can be evaluated.
eval_results = [op.eval() for op in ops_to_eval]
if correctness_function is not None:
results = correctness_function(*eval_results)
with tf.gfile.Open(os.path.join(data_dir, "results.json"), "w") as f:
json.dump(results, f)
with tf.gfile.Open(os.path.join(data_dir, "tf_version.json"), "w") as f:
json.dump([tf.VERSION, tf.GIT_VERSION], f)
def _evaluate_test_case(self, name, graph, ops_to_eval, correctness_function):
"""Determine if a graph agrees with the reference data.
Args:
name: String defining the run. This will be used to define folder names
and will be used for random seed construction.
graph: The graph in which the test is conducted.
ops_to_eval: Ops which the user wishes to be evaluated under a controlled
session.
correctness_function: This function accepts the evaluated results of
ops_to_eval, and returns a list of values. This list must be JSON
serializable; in particular it is up to the user to convert numpy
dtypes into builtin dtypes.
"""
data_dir = os.path.join(self.data_root, name)
# Serialize graph for comparison.
graph_bytes = graph.as_graph_def().SerializeToString()
expected_file = os.path.join(data_dir, "expected_graph")
with tf.gfile.Open(expected_file, "rb") as f:
expected_graph_bytes = f.read()
# The serialization is non-deterministic byte-for-byte. Instead there is
# a utility which evaluates the semantics of the two graphs to test for
# equality. This has the added benefit of providing some information on
# what changed.
# Note: The summary only show the first difference detected. It is not
# an exhaustive summary of differences.
differences = pywrap_tensorflow.EqualGraphDefWrapper(
graph_bytes, expected_graph_bytes).decode("utf-8")
with graph.as_default():
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.gfile.Open(os.path.join(data_dir, "tf_version.json"), "r") as f:
tf_version_reference, tf_git_version_reference = json.load(
f) # pylint: disable=unpacking-non-sequence
tf_version_comparison = ""
if tf.GIT_VERSION != tf_git_version_reference:
tf_version_comparison = (
"Test was built using: {} (git = {})\n"
"Local TensorFlow version: {} (git = {})"
.format(tf_version_reference, tf_git_version_reference,
tf.VERSION, tf.GIT_VERSION)
)
with self.test_session(graph=graph) as sess:
sess.run(init)
try:
saver.restore(sess=sess, save_path=os.path.join(
data_dir, self.ckpt_prefix))
if differences:
tf.logging.warn(
"The provided graph is different than expected:\n {}\n"
"However the weights were still able to be loaded.\n{}".format(
differences, tf_version_comparison)
)
except: # pylint: disable=bare-except
raise self.failureException(
"Weight load failed. Graph comparison:\n {}{}"
.format(differences, tf_version_comparison))
eval_results = [op.eval() for op in ops_to_eval]
if correctness_function is not None:
results = correctness_function(*eval_results)
with tf.gfile.Open(os.path.join(data_dir, "results.json"), "r") as f:
expected_results = json.load(f)
self.assertAllClose(results, expected_results)
def _save_or_test_ops(self, name, graph, ops_to_eval=None, test=True,
correctness_function=None):
"""Utility function to automate repeated work of graph checking and saving.
The philosophy of this function is that the user need only define ops on
a graph and specify which results should be validated. The actual work of
managing snapshots and calculating results should be automated away.
Args:
name: String defining the run. This will be used to define folder names
and will be used for random seed construction.
graph: The graph in which the test is conducted.
ops_to_eval: Ops which the user wishes to be evaluated under a controlled
session.
test: Boolean. If True this function will test graph correctness, load
weights, and compute numerical values. If False the necessary test data
will be generated and saved.
correctness_function: This function accepts the evaluated results of
ops_to_eval, and returns a list of values. This list must be JSON
serializable; in particular it is up to the user to convert numpy
dtypes into builtin dtypes.
"""
ops_to_eval = ops_to_eval or []
if test:
try:
self._evaluate_test_case(
name=name, graph=graph, ops_to_eval=ops_to_eval,
correctness_function=correctness_function
)
except:
tf.logging.error("Failed unittest {}".format(name))
raise
else:
self._construct_and_save_reference_files(
name=name, graph=graph, ops_to_eval=ops_to_eval,
correctness_function=correctness_function
)
class ReferenceDataActionParser(argparse.ArgumentParser):
"""Minimal arg parser so that test regeneration can be called from the CLI."""
def __init__(self):
super(ReferenceDataActionParser, self).__init__()
self.add_argument(
"--regenerate", "-regen",
action="store_true",
help="Enable this flag to regenerate test data. If not set unit tests"
"will be run."
)
def main(argv, test_class):
"""Simple switch function to allow test regeneration from the CLI."""
flags = ReferenceDataActionParser().parse_args(argv[1:])
if flags.regenerate:
if sys.version_info[0] == 2:
raise NameError("\nPython2 unittest does not support being run as a "
"standalone class.\nAs a result tests must be "
"regenerated using Python3.\n"
"Tests can be run under 2 or 3.")
test_class().regenerate()
else:
tf.test.main()
|
py | 1a33a599b1c8d6adbd2862e7989f435e4500168d | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating user with valid payload is successful"""
payload = {
'email': '[email protected]',
'password': 'testpass',
'name': 'Test Name'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating a user that already exists"""
payload = {
'email': '[email protected]',
'password': 'testpass',
'name': 'Test Name'
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that the password must be more than five characters"""
payload = {
'email': '[email protected]',
'password': 'pw',
'name': 'Test Name'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {
'email': '[email protected]',
'password': 'testpass',
'name': 'Test Name'
}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='[email protected]', password='testpass', name='Test Name')
payload = {
'email': '[email protected]',
'password': 'wrongpass',
'name': 'Test Name'
}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doesn't exist"""
payload = {
'email': '[email protected]',
'password': 'testpass'
}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='[email protected]',
password='testpass',
name='Test Name'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_not_allowed(self):
"""Test that post is not allowed on the me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile_(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new name', 'password': 'newpassword'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
|
py | 1a33a621dbb6c46d09b80c36b103bdca4f674e8a | import logging
import math
import time
from asyncio import Lock
from random import choice, randrange
from secrets import randbits
from typing import Dict, List, Optional, Set, Tuple
from hddcoin.types.peer_info import PeerInfo, TimestampedPeerInfo
from hddcoin.util.hash import std_hash
from hddcoin.util.ints import uint16, uint64
TRIED_BUCKETS_PER_GROUP = 8
NEW_BUCKETS_PER_SOURCE_GROUP = 64
TRIED_BUCKET_COUNT = 256
NEW_BUCKET_COUNT = 1024
BUCKET_SIZE = 64
TRIED_COLLISION_SIZE = 10
NEW_BUCKETS_PER_ADDRESS = 8
LOG_TRIED_BUCKET_COUNT = 3
LOG_NEW_BUCKET_COUNT = 10
LOG_BUCKET_SIZE = 6
HORIZON_DAYS = 30
MAX_RETRIES = 3
MIN_FAIL_DAYS = 7
MAX_FAILURES = 10
log = logging.getLogger(__name__)
# This is a Python port from 'CAddrInfo' class from Bitcoin core code.
class ExtendedPeerInfo:
def __init__(
self,
addr: TimestampedPeerInfo,
src_peer: Optional[PeerInfo],
):
self.peer_info: PeerInfo = PeerInfo(
addr.host,
addr.port,
)
self.timestamp: int = addr.timestamp
self.src: Optional[PeerInfo] = src_peer
if src_peer is None:
self.src = self.peer_info
self.random_pos: Optional[int] = None
self.is_tried: bool = False
self.ref_count: int = 0
self.last_success: int = 0
self.last_try: int = 0
self.num_attempts: int = 0
self.last_count_attempt: int = 0
def to_string(self) -> str:
assert self.src is not None
out = (
self.peer_info.host
+ " "
+ str(int(self.peer_info.port))
+ " "
+ str(int(self.timestamp))
+ " "
+ self.src.host
+ " "
+ str(int(self.src.port))
)
return out
@classmethod
def from_string(cls, peer_str: str):
blobs = peer_str.split(" ")
assert len(blobs) == 5
peer_info = TimestampedPeerInfo(blobs[0], uint16(int(blobs[1])), uint64(int(blobs[2])))
src_peer = PeerInfo(blobs[3], uint16(int(blobs[4])))
return cls(peer_info, src_peer)
def get_tried_bucket(self, key: int) -> int:
hash1 = int.from_bytes(
bytes(std_hash(key.to_bytes(32, byteorder="big") + self.peer_info.get_key())[:8]),
byteorder="big",
)
hash1 = hash1 % TRIED_BUCKETS_PER_GROUP
hash2 = int.from_bytes(
bytes(std_hash(key.to_bytes(32, byteorder="big") + self.peer_info.get_group() + bytes([hash1]))[:8]),
byteorder="big",
)
return hash2 % TRIED_BUCKET_COUNT
def get_new_bucket(self, key: int, src_peer: Optional[PeerInfo] = None) -> int:
if src_peer is None:
src_peer = self.src
assert src_peer is not None
hash1 = int.from_bytes(
bytes(std_hash(key.to_bytes(32, byteorder="big") + self.peer_info.get_group() + src_peer.get_group())[:8]),
byteorder="big",
)
hash1 = hash1 % NEW_BUCKETS_PER_SOURCE_GROUP
hash2 = int.from_bytes(
bytes(std_hash(key.to_bytes(32, byteorder="big") + src_peer.get_group() + bytes([hash1]))[:8]),
byteorder="big",
)
return hash2 % NEW_BUCKET_COUNT
def get_bucket_position(self, key: int, is_new: bool, nBucket: int) -> int:
ch = "N" if is_new else "K"
hash1 = int.from_bytes(
bytes(
std_hash(
key.to_bytes(32, byteorder="big")
+ ch.encode()
+ nBucket.to_bytes(3, byteorder="big")
+ self.peer_info.get_key()
)[:8]
),
byteorder="big",
)
return hash1 % BUCKET_SIZE
def is_terrible(self, now: Optional[int] = None) -> bool:
if now is None:
now = int(math.floor(time.time()))
# never remove things tried in the last minute
if self.last_try > 0 and self.last_try >= now - 60:
return False
# came in a flying DeLorean
if self.timestamp > now + 10 * 60:
return True
# not seen in recent history
if self.timestamp == 0 or now - self.timestamp > HORIZON_DAYS * 24 * 60 * 60:
return True
# tried N times and never a success
if self.last_success == 0 and self.num_attempts >= MAX_RETRIES:
return True
# N successive failures in the last week
if now - self.last_success > MIN_FAIL_DAYS * 24 * 60 * 60 and self.num_attempts >= MAX_FAILURES:
return True
return False
def get_selection_chance(self, now: Optional[int] = None):
if now is None:
now = int(math.floor(time.time()))
chance = 1.0
since_last_try = max(now - self.last_try, 0)
# deprioritize very recent attempts away
if since_last_try < 60 * 10:
chance *= 0.01
# deprioritize 66% after each failed attempt,
# but at most 1/28th to avoid the search taking forever or overly penalizing outages.
chance *= pow(0.66, min(self.num_attempts, 8))
return chance
# This is a Python port from 'CAddrMan' class from Bitcoin core code.
class AddressManager:
id_count: int
key: int
random_pos: List[int]
tried_matrix: List[List[int]]
new_matrix: List[List[int]]
tried_count: int
new_count: int
map_addr: Dict[str, int]
map_info: Dict[int, ExtendedPeerInfo]
last_good: int
tried_collisions: List[int]
used_new_matrix_positions: Set[Tuple[int, int]]
used_tried_matrix_positions: Set[Tuple[int, int]]
allow_private_subnets: bool
def __init__(self) -> None:
self.clear()
self.lock: Lock = Lock()
def clear(self) -> None:
self.id_count = 0
self.key = randbits(256)
self.random_pos = []
self.tried_matrix = [[-1 for x in range(BUCKET_SIZE)] for y in range(TRIED_BUCKET_COUNT)]
self.new_matrix = [[-1 for x in range(BUCKET_SIZE)] for y in range(NEW_BUCKET_COUNT)]
self.tried_count = 0
self.new_count = 0
self.map_addr = {}
self.map_info = {}
self.last_good = 1
self.tried_collisions = []
self.used_new_matrix_positions = set()
self.used_tried_matrix_positions = set()
self.allow_private_subnets = False
def make_private_subnets_valid(self) -> None:
self.allow_private_subnets = True
# Use only this method for modifying new matrix.
def _set_new_matrix(self, row: int, col: int, value: int) -> None:
self.new_matrix[row][col] = value
if value == -1:
if (row, col) in self.used_new_matrix_positions:
self.used_new_matrix_positions.remove((row, col))
else:
if (row, col) not in self.used_new_matrix_positions:
self.used_new_matrix_positions.add((row, col))
# Use only this method for modifying tried matrix.
def _set_tried_matrix(self, row: int, col: int, value: int) -> None:
self.tried_matrix[row][col] = value
if value == -1:
if (row, col) in self.used_tried_matrix_positions:
self.used_tried_matrix_positions.remove((row, col))
else:
if (row, col) not in self.used_tried_matrix_positions:
self.used_tried_matrix_positions.add((row, col))
def load_used_table_positions(self) -> None:
self.used_new_matrix_positions = set()
self.used_tried_matrix_positions = set()
for bucket in range(NEW_BUCKET_COUNT):
for pos in range(BUCKET_SIZE):
if self.new_matrix[bucket][pos] != -1:
self.used_new_matrix_positions.add((bucket, pos))
for bucket in range(TRIED_BUCKET_COUNT):
for pos in range(BUCKET_SIZE):
if self.tried_matrix[bucket][pos] != -1:
self.used_tried_matrix_positions.add((bucket, pos))
def create_(self, addr: TimestampedPeerInfo, addr_src: Optional[PeerInfo]) -> Tuple[ExtendedPeerInfo, int]:
self.id_count += 1
node_id = self.id_count
self.map_info[node_id] = ExtendedPeerInfo(addr, addr_src)
self.map_addr[addr.host] = node_id
self.map_info[node_id].random_pos = len(self.random_pos)
self.random_pos.append(node_id)
return (self.map_info[node_id], node_id)
def find_(self, addr: PeerInfo) -> Tuple[Optional[ExtendedPeerInfo], Optional[int]]:
if addr.host not in self.map_addr:
return (None, None)
node_id = self.map_addr[addr.host]
if node_id not in self.map_info:
return (None, node_id)
return (self.map_info[node_id], node_id)
def swap_random_(self, rand_pos_1: int, rand_pos_2: int) -> None:
if rand_pos_1 == rand_pos_2:
return None
assert rand_pos_1 < len(self.random_pos) and rand_pos_2 < len(self.random_pos)
node_id_1 = self.random_pos[rand_pos_1]
node_id_2 = self.random_pos[rand_pos_2]
self.map_info[node_id_1].random_pos = rand_pos_2
self.map_info[node_id_2].random_pos = rand_pos_1
self.random_pos[rand_pos_1] = node_id_2
self.random_pos[rand_pos_2] = node_id_1
def make_tried_(self, info: ExtendedPeerInfo, node_id: int) -> None:
for bucket in range(NEW_BUCKET_COUNT):
pos = info.get_bucket_position(self.key, True, bucket)
if self.new_matrix[bucket][pos] == node_id:
self._set_new_matrix(bucket, pos, -1)
info.ref_count -= 1
assert info.ref_count == 0
self.new_count -= 1
cur_bucket = info.get_tried_bucket(self.key)
cur_bucket_pos = info.get_bucket_position(self.key, False, cur_bucket)
if self.tried_matrix[cur_bucket][cur_bucket_pos] != -1:
# Evict the old node from the tried table.
node_id_evict = self.tried_matrix[cur_bucket][cur_bucket_pos]
assert node_id_evict in self.map_info
old_info = self.map_info[node_id_evict]
old_info.is_tried = False
self._set_tried_matrix(cur_bucket, cur_bucket_pos, -1)
self.tried_count -= 1
# Find its position into new table.
new_bucket = old_info.get_new_bucket(self.key)
new_bucket_pos = old_info.get_bucket_position(self.key, True, new_bucket)
self.clear_new_(new_bucket, new_bucket_pos)
old_info.ref_count = 1
self._set_new_matrix(new_bucket, new_bucket_pos, node_id_evict)
self.new_count += 1
self._set_tried_matrix(cur_bucket, cur_bucket_pos, node_id)
self.tried_count += 1
info.is_tried = True
def clear_new_(self, bucket: int, pos: int) -> None:
if self.new_matrix[bucket][pos] != -1:
delete_id = self.new_matrix[bucket][pos]
delete_info = self.map_info[delete_id]
assert delete_info.ref_count > 0
delete_info.ref_count -= 1
self._set_new_matrix(bucket, pos, -1)
if delete_info.ref_count == 0:
self.delete_new_entry_(delete_id)
def mark_good_(self, addr: PeerInfo, test_before_evict: bool, timestamp: int) -> None:
self.last_good = timestamp
(info, node_id) = self.find_(addr)
if not addr.is_valid(self.allow_private_subnets):
return None
if info is None:
return None
if node_id is None:
return None
if not (info.peer_info.host == addr.host and info.peer_info.port == addr.port):
return None
# update info
info.last_success = timestamp
info.last_try = timestamp
info.num_attempts = 0
# timestamp is not updated here, to avoid leaking information about
# currently-connected peers.
# if it is already in the tried set, don't do anything else
if info.is_tried:
return None
# find a bucket it is in now
bucket_rand = randrange(NEW_BUCKET_COUNT)
new_bucket = -1
for n in range(NEW_BUCKET_COUNT):
cur_new_bucket = (n + bucket_rand) % NEW_BUCKET_COUNT
cur_new_bucket_pos = info.get_bucket_position(self.key, True, cur_new_bucket)
if self.new_matrix[cur_new_bucket][cur_new_bucket_pos] == node_id:
new_bucket = cur_new_bucket
break
# if no bucket is found, something bad happened;
if new_bucket == -1:
return None
# NOTE(Florin): Double check this. It's not used anywhere else.
# which tried bucket to move the entry to
tried_bucket = info.get_tried_bucket(self.key)
tried_bucket_pos = info.get_bucket_position(self.key, False, tried_bucket)
# Will moving this address into tried evict another entry?
if test_before_evict and self.tried_matrix[tried_bucket][tried_bucket_pos] != -1:
if len(self.tried_collisions) < TRIED_COLLISION_SIZE:
if node_id not in self.tried_collisions:
self.tried_collisions.append(node_id)
else:
self.make_tried_(info, node_id)
def delete_new_entry_(self, node_id: int) -> None:
info = self.map_info[node_id]
if info is None or info.random_pos is None:
return None
self.swap_random_(info.random_pos, len(self.random_pos) - 1)
self.random_pos = self.random_pos[:-1]
del self.map_addr[info.peer_info.host]
del self.map_info[node_id]
self.new_count -= 1
def add_to_new_table_(self, addr: TimestampedPeerInfo, source: Optional[PeerInfo], penalty: int) -> bool:
is_unique = False
peer_info = PeerInfo(
addr.host,
addr.port,
)
if not peer_info.is_valid(self.allow_private_subnets):
return False
(info, node_id) = self.find_(peer_info)
if info is not None and info.peer_info.host == addr.host and info.peer_info.port == addr.port:
penalty = 0
if info is not None:
# periodically update timestamp
currently_online = time.time() - addr.timestamp < 24 * 60 * 60
update_interval = 60 * 60 if currently_online else 24 * 60 * 60
if addr.timestamp > 0 and (
info.timestamp > 0 or info.timestamp < addr.timestamp - update_interval - penalty
):
info.timestamp = max(0, addr.timestamp - penalty)
# do not update if no new information is present
if addr.timestamp == 0 or (info.timestamp > 0 and addr.timestamp <= info.timestamp):
return False
# do not update if the entry was already in the "tried" table
if info.is_tried:
return False
# do not update if the max reference count is reached
if info.ref_count == NEW_BUCKETS_PER_ADDRESS:
return False
# stochastic test: previous ref_count == N: 2^N times harder to increase it
factor = 1 << info.ref_count
if factor > 1 and randrange(factor) != 0:
return False
else:
(info, node_id) = self.create_(addr, source)
info.timestamp = max(0, info.timestamp - penalty)
self.new_count += 1
is_unique = True
new_bucket = info.get_new_bucket(self.key, source)
new_bucket_pos = info.get_bucket_position(self.key, True, new_bucket)
if self.new_matrix[new_bucket][new_bucket_pos] != node_id:
add_to_new = self.new_matrix[new_bucket][new_bucket_pos] == -1
if not add_to_new:
info_existing = self.map_info[self.new_matrix[new_bucket][new_bucket_pos]]
if info_existing.is_terrible() or (info_existing.ref_count > 1 and info.ref_count == 0):
add_to_new = True
if add_to_new:
self.clear_new_(new_bucket, new_bucket_pos)
info.ref_count += 1
if node_id is not None:
self._set_new_matrix(new_bucket, new_bucket_pos, node_id)
else:
if info.ref_count == 0:
if node_id is not None:
self.delete_new_entry_(node_id)
return is_unique
def attempt_(self, addr: PeerInfo, count_failures: bool, timestamp: int) -> None:
info, _ = self.find_(addr)
if info is None:
return None
if not (info.peer_info.host == addr.host and info.peer_info.port == addr.port):
return None
info.last_try = timestamp
if count_failures and info.last_count_attempt < self.last_good:
info.last_count_attempt = timestamp
info.num_attempts += 1
def select_peer_(self, new_only: bool) -> Optional[ExtendedPeerInfo]:
if len(self.random_pos) == 0:
return None
if new_only and self.new_count == 0:
return None
# Use a 50% chance for choosing between tried and new table entries.
if not new_only and self.tried_count > 0 and (self.new_count == 0 or randrange(2) == 0):
chance = 1.0
start = time.time()
cached_tried_matrix_positions: List[Tuple[int, int]] = []
if len(self.used_tried_matrix_positions) < math.sqrt(TRIED_BUCKET_COUNT * BUCKET_SIZE):
cached_tried_matrix_positions = list(self.used_tried_matrix_positions)
while True:
if len(self.used_tried_matrix_positions) < math.sqrt(TRIED_BUCKET_COUNT * BUCKET_SIZE):
if len(self.used_tried_matrix_positions) == 0:
log.error(f"Empty tried table, but tried_count shows {self.tried_count}.")
return None
# The table is sparse, randomly pick from positions list.
index = randrange(len(cached_tried_matrix_positions))
tried_bucket, tried_bucket_pos = cached_tried_matrix_positions[index]
else:
# The table is dense, randomly trying positions is faster than loading positions list.
tried_bucket = randrange(TRIED_BUCKET_COUNT)
tried_bucket_pos = randrange(BUCKET_SIZE)
while self.tried_matrix[tried_bucket][tried_bucket_pos] == -1:
tried_bucket = (tried_bucket + randbits(LOG_TRIED_BUCKET_COUNT)) % TRIED_BUCKET_COUNT
tried_bucket_pos = (tried_bucket_pos + randbits(LOG_BUCKET_SIZE)) % BUCKET_SIZE
node_id = self.tried_matrix[tried_bucket][tried_bucket_pos]
assert node_id != -1
info = self.map_info[node_id]
if randbits(30) < (chance * info.get_selection_chance() * (1 << 30)):
end = time.time()
log.debug(f"address_manager.select_peer took {(end - start):.2e} seconds in tried table.")
return info
chance *= 1.2
else:
chance = 1.0
start = time.time()
cached_new_matrix_positions: List[Tuple[int, int]] = []
if len(self.used_new_matrix_positions) < math.sqrt(NEW_BUCKET_COUNT * BUCKET_SIZE):
cached_new_matrix_positions = list(self.used_new_matrix_positions)
while True:
if len(self.used_new_matrix_positions) < math.sqrt(NEW_BUCKET_COUNT * BUCKET_SIZE):
if len(self.used_new_matrix_positions) == 0:
log.error(f"Empty new table, but new_count shows {self.new_count}.")
return None
index = randrange(len(cached_new_matrix_positions))
new_bucket, new_bucket_pos = cached_new_matrix_positions[index]
else:
new_bucket = randrange(NEW_BUCKET_COUNT)
new_bucket_pos = randrange(BUCKET_SIZE)
while self.new_matrix[new_bucket][new_bucket_pos] == -1:
new_bucket = (new_bucket + randbits(LOG_NEW_BUCKET_COUNT)) % NEW_BUCKET_COUNT
new_bucket_pos = (new_bucket_pos + randbits(LOG_BUCKET_SIZE)) % BUCKET_SIZE
node_id = self.new_matrix[new_bucket][new_bucket_pos]
assert node_id != -1
info = self.map_info[node_id]
if randbits(30) < chance * info.get_selection_chance() * (1 << 30):
end = time.time()
log.debug(f"address_manager.select_peer took {(end - start):.2e} seconds in new table.")
return info
chance *= 1.2
def resolve_tried_collisions_(self) -> None:
for node_id in self.tried_collisions[:]:
resolved = False
if node_id not in self.map_info:
resolved = True
else:
info = self.map_info[node_id]
peer = info.peer_info
tried_bucket = info.get_tried_bucket(self.key)
tried_bucket_pos = info.get_bucket_position(self.key, False, tried_bucket)
if self.tried_matrix[tried_bucket][tried_bucket_pos] != -1:
old_id = self.tried_matrix[tried_bucket][tried_bucket_pos]
old_info = self.map_info[old_id]
if time.time() - old_info.last_success < 4 * 60 * 60:
resolved = True
elif time.time() - old_info.last_try < 4 * 60 * 60:
if time.time() - old_info.last_try > 60:
self.mark_good_(peer, False, math.floor(time.time()))
resolved = True
elif time.time() - info.last_success > 40 * 60:
self.mark_good_(peer, False, math.floor(time.time()))
resolved = True
else:
self.mark_good_(peer, False, math.floor(time.time()))
resolved = True
if resolved:
self.tried_collisions.remove(node_id)
def select_tried_collision_(self) -> Optional[ExtendedPeerInfo]:
if len(self.tried_collisions) == 0:
return None
new_id = choice(self.tried_collisions)
if new_id not in self.map_info:
self.tried_collisions.remove(new_id)
return None
new_info = self.map_info[new_id]
tried_bucket = new_info.get_tried_bucket(self.key)
tried_bucket_pos = new_info.get_bucket_position(self.key, False, tried_bucket)
old_id = self.tried_matrix[tried_bucket][tried_bucket_pos]
return self.map_info[old_id]
def get_peers_(self) -> List[TimestampedPeerInfo]:
addr: List[TimestampedPeerInfo] = []
num_nodes = math.ceil(23 * len(self.random_pos) / 100)
if num_nodes > 1000:
num_nodes = 1000
for n in range(len(self.random_pos)):
if len(addr) >= num_nodes:
return addr
rand_pos = randrange(len(self.random_pos) - n) + n
self.swap_random_(n, rand_pos)
info = self.map_info[self.random_pos[n]]
if not info.peer_info.is_valid(self.allow_private_subnets):
continue
if not info.is_terrible():
cur_peer_info = TimestampedPeerInfo(
info.peer_info.host,
uint16(info.peer_info.port),
uint64(info.timestamp),
)
addr.append(cur_peer_info)
return addr
def cleanup(self, max_timestamp_difference: int, max_consecutive_failures: int):
now = int(math.floor(time.time()))
for bucket in range(NEW_BUCKET_COUNT):
for pos in range(BUCKET_SIZE):
if self.new_matrix[bucket][pos] != -1:
node_id = self.new_matrix[bucket][pos]
cur_info = self.map_info[node_id]
if (
cur_info.timestamp < now - max_timestamp_difference
and cur_info.num_attempts >= max_consecutive_failures
):
self.clear_new_(bucket, pos)
def connect_(self, addr: PeerInfo, timestamp: int):
info, _ = self.find_(addr)
if info is None:
return None
# check whether we are talking about the exact same peer
if not (info.peer_info.host == addr.host and info.peer_info.port == addr.port):
return None
update_interval = 20 * 60
if timestamp - info.timestamp > update_interval:
info.timestamp = timestamp
async def size(self) -> int:
async with self.lock:
return len(self.random_pos)
async def add_to_new_table(
self,
addresses: List[TimestampedPeerInfo],
source: Optional[PeerInfo] = None,
penalty: int = 0,
) -> bool:
is_added = False
async with self.lock:
for addr in addresses:
cur_peer_added = self.add_to_new_table_(addr, source, penalty)
is_added = is_added or cur_peer_added
return is_added
# Mark an entry as accesible.
async def mark_good(
self,
addr: PeerInfo,
test_before_evict: bool = True,
timestamp: int = -1,
):
if timestamp == -1:
timestamp = math.floor(time.time())
async with self.lock:
self.mark_good_(addr, test_before_evict, timestamp)
# Mark an entry as connection attempted to.
async def attempt(
self,
addr: PeerInfo,
count_failures: bool,
timestamp: int = -1,
):
if timestamp == -1:
timestamp = math.floor(time.time())
async with self.lock:
self.attempt_(addr, count_failures, timestamp)
# See if any to-be-evicted tried table entries have been tested and if so resolve the collisions.
async def resolve_tried_collisions(self):
async with self.lock:
self.resolve_tried_collisions_()
# Randomly select an address in tried that another address is attempting to evict.
async def select_tried_collision(self) -> Optional[ExtendedPeerInfo]:
async with self.lock:
return self.select_tried_collision_()
# Choose an address to connect to.
async def select_peer(self, new_only: bool = False) -> Optional[ExtendedPeerInfo]:
async with self.lock:
return self.select_peer_(new_only)
# Return a bunch of addresses, selected at random.
async def get_peers(self) -> List[TimestampedPeerInfo]:
async with self.lock:
return self.get_peers_()
async def connect(self, addr: PeerInfo, timestamp: int = -1):
if timestamp == -1:
timestamp = math.floor(time.time())
async with self.lock:
return self.connect_(addr, timestamp)
|
py | 1a33a72dcba739a9b2e7ca64dc926e003192b905 | # -*- coding: utf-8 -*-
"""
All spiders should yield data shaped according to the Open Civic Data
specification (http://docs.opencivicdata.org/en/latest/data/event.html).
"""
from datetime import datetime
from pytz import timezone
from legistar.events import LegistarEventsScraper
from documenters_aggregator.spider import Spider
class Cook_boardSpider(Spider):
name = 'cook_board'
long_name = 'Cook County Board of Commissioners'
allowed_domains = ['cook-county.legistar.com']
event_timezone = 'America/Chicago'
start_urls = ['https://www.cook-county.legistar.com'] # use LegistarEventsScraper instead
def parse(self, response):
"""
`parse` should always `yield` a dict that follows the `Open Civic Data
event standard <http://docs.opencivicdata.org/en/latest/data/event.html>`.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
events = self._make_legistar_call()
return self._parse_events(events)
def _make_legistar_call(self, since=None):
les = LegistarEventsScraper(jurisdiction=None, datadir=None)
les.EVENTSPAGE = 'https://cook-county.legistar.com/Calendar.aspx'
les.BASE_URL = 'https://cook-county.legistar.com'
if not since:
since = datetime.today().year
return les.events(since=since)
def _parse_events(self, events):
for item, _ in events:
start_time = self._parse_start(item)
data = {
'_type': 'event',
'name': self._parse_name(item),
'description': self._parse_description(item),
'classification': self._parse_classification(item),
'start_time': start_time,
'end_time': self._parse_end(item),
'all_day': self._parse_all_day(item),
'timezone': self.event_timezone,
'location': self._parse_location(item),
'sources': self._parse_sources(item)
}
data['status'] = self._parse_status(item, data['start_time'])
data['id'] = self._generate_id(data, start_time)
yield data
def _parse_classification(self, item):
"""
Parse or generate classification (e.g. town hall).
"""
return 'Not classified'
def _parse_status(self, item, start_time):
"""
passed = meeting already started
tentative = no agenda posted
confirmed = agenda posted
"""
if datetime.now().replace(tzinfo=timezone(self.event_timezone)) > start_time:
return 'passed'
if 'url' in item['Agenda']:
return 'confirmed'
return 'tentative'
def _parse_location(self, item):
"""
Parse or generate location. Url, latitutde and longitude are all
optional and may be more trouble than they're worth to collect.
"""
return {
'url': None,
'address': item.get('Meeting Location', None),
'name': None,
'coordinates': {
'latitude': None,
'longitude': None,
},
}
def _parse_all_day(self, item):
"""
Parse or generate all-day status. Defaults to false.
"""
return False
def _parse_name(self, item):
"""
Parse or generate event name.
"""
return item['Name']['label']
def _parse_description(self, item):
"""
Parse or generate event name.
"""
agenda = item['Agenda']
try:
return agenda['url']
except:
return agenda
def _parse_start(self, item):
"""
Parse start date and time.
"""
time = item.get('Meeting Time', None)
date = item.get('Meeting Date', None)
if date and time:
time_string = '{0} {1}'.format(date, time)
naive = datetime.strptime(time_string, '%m/%d/%Y %I:%M %p')
return self._naive_datetime_to_tz(naive, self.event_timezone)
return None
def _parse_end(self, item):
"""
Parse end date and time.
"""
return None
def _parse_sources(self, item):
"""
Parse sources.
"""
try:
url = item['Name']['url']
except:
url = 'https://cook-county.legistar.com/Calendar.aspx'
return [{'url': url, 'note': ''}]
|
py | 1a33a7abbbea753754a192fc86a51c1737631e5e | from django.apps import AppConfig
class GramConfig(AppConfig):
name = 'gram'
|
py | 1a33a8746294870356c36d8b92bbcb53af8e4730 | from django.urls import path, include
urlpatterns = [
path('search/', include('search.urls')),
path('delivery/', include('email_delivery.urls'))
]
|
py | 1a33a8787b6015437dc572fe03963be3e941af8a | import json
import numpy as np
import os.path, datetime, subprocess
#from astropy.io import fits as pyfits
#from time import sleep
#from scipy.ndimage import gaussian_filter#, rotate
#from scipy.interpolate import interp1d
#from scipy.optimize import curve_fit
from .tools import *
from .phi_fits import *
from .phi_gen import *
from .phi_reg import *
from .phi_rte import *
#from .phi_utils import newton,azimutal_average,limb_darkening,genera_2d,find_string
from .phifdt_pipe_modules import phi_correct_dark,phi_correct_prefilter,phi_apply_demodulation,\
crosstalk_ItoQUV,cross_talk_QUV,crosstalk_ItoQUV2d,phi_correct_ghost,phi_correct_fringes,\
generate_level2
import SPGPylibs.GENtools.plot_lib as plib
import SPGPylibs.GENtools.cog as cog
#global variables
PLT_RNG = 5
def phifdt_pipe(json_input = None,
data_f: str = None, dark_f: str = None, flat_f: str = None,
input_data_dir: str = './', output_dir:str = './',
instrument: str = 'FDT40',
flat_c:bool = True, dark_c:bool = True, ItoQUV:bool = False, VtoQU:bool = False, ind_wave:bool = False, #correction options
hough_params:list = [250, 800, 100],
norm_f:bool = False, flat_scaling:float = 1., flat_index:list = None, #flatfield options
prefilter:bool = True, prefilter_fits:str = '0000990710_noMeta.fits',
realign:bool = False, verbose:bool = True, shrink_mask:int = 2, correct_fringes:str = False,
correct_ghost:bool = False, putmediantozero:bool = True,
rte = False, debug:bool = False, nlevel:float = 0.3, center_method:str = 'circlefit',
loopthis = 0, #developing purpose
do2d = 0, outfile = None #not in use
) -> int:
'''
Parameters
----------
:param str data_f:
Input parameters
----------
json_input = json input (for convenience). All parameters are then described there).
data_f = data_f : string
Fits file of the raw FDT data (for path use input_data_dir keyword)
dark_f = dark_f : string
Fits file of a Valid dark file (processed dark) (including path, if necessary)
flat_f = flat_f : string
Fits file of a Valid FDT flatfield (including path, if necessary)
input_data_dir: directory where input data is located. Default is local directory
output_dir: output directory. If default, takes local './'
IMPORTANT: dark_f, flat_f, and prefilter file must be provided with the FULL PATH.
the data has to be provided as a list of files (fits) and the directory via "input_data_dir = "
The output directories (Depending on RTE on or off) are
A) directory + Level 2: reduced raw data L2+ilam plus RTE output (so far, BLOS, VLOS and SO1: continuum)
B) directory + Level 2 + png: png figures of the RTE output (so far, BLOS and VLOS)
B) directory + Level 2 + npz: NPZ (python) reduced raw data L2
** OPTIONAL ARGUMENTS **
instrument = 'FDT40' : select the instrument and PMP temperature (for demod)
-> implemented cases: -- 'FDT40','FDT45' --
flat_c = True : default is to apply flat field correction to the data
dark_c = True : default is to apply dark field correction to the data
norm_f = False : To normalize flats internally to the mean value of 5% of the disk (central) intensity
flat_scaling = 1.0 : flat scaling (flat = flat / flat_scaling)
flat_index = None : in case you want a particular flat to be applied at another wave, e.g.,
flat_index = [5,1,2,3,4,0] exchange the first and last wave flats
This is for testing stuff, mainly.
prefilter = 1 : To correct for the prefilter
prefilter_fits = '../RSW1/0000990710_noMeta.fits' : User should provide prefilter data fits file location
realign = False : bool
Realign all images before demodulating using FFT
ind_wave = False : bool
Correct crosstalk from I to QUV for individual wavelengths
vervose: True prints a lot of stuff (and plots)
shrink_mask = 2: 'Number of pixels to contract the sun mask for output of RTE'
correct_fringes = False: Fringe correction
'manual': first FM version. Freq, mask applied to all images with fixed frequencies
'auto' : calculate fringes freq. automatically (in development).
correct_ghost = False; Correcto ghost images
putmediantozero=True; puts median value to zero before RTE
rte = False: Run RTE if rte == 'RTE' or rte == 'CE' or rte == 'CE+RTE':
'RTE': RTE only
'CE+RTE': RTE with classical estiamtes
'CE': Only classical estimates
'cog': Only Center of gravity (To be implemented)
ItoQUV= False: apply crostalk correction from Stokes I to Stokes Q, U, and V.
VtoQU= False: apply crostalk correction from Stokes V to Stokes Q and U.
nlevel = 0.3: Noise level above which to evaluate cross_talk_VQU (To be implemented)
center_method = ['circlefit','hough']
Default is 'circlefit'. If set to 'hough' uses the given find_center parameters
If find_center is set to None then uses header information, in any case
hough_params = [250, 800, 100]; inner_radius = 250, outer_radius = 600, steps = 100 : initial values for finding sun center
verbose: increase the verbosity (many plots here) - default False
Returns
-------
0 if fail, 1 any other case
Raises
------
References
----------
Examples
--------
>>> import SPGPylibs as spg
Notes
-----
This program is not optimized for speed. It assumes that input data is 6 wavelength.
C-MILOS must be compiled in each specific machine (C)
The software update some of the information in the fits keyword:
TODO:
# data_f -> input data (single file for FDT - ADD MULTIPLE FILES!!!! )
keyword to provide fixed cross-talk coefficients
keyword to provide fixed data normalization (based on a first obs)
# pending to add class stile (for module development)
FDT pipeline steps:
1- Read data
2- Check dimensions (open to bugs since only few data were tested)
3- Read flats
4- Read and correct dark field (taking into account the scaling)
5- Find center of the Sun in the data for masking and ghost correction
6- get wavelength sampling from header
7- move the continuum to the blue (if needed) in flat and data
8- interpolate flats (Not implemented yet - in case of deviations in voltage)
9- Correct flat-field
9- Correct prefilter - needs prefilter data file!
9- Correct ghost (complex step) [NEEDED in FM - high priority]
10- realign data before demodulation [NEEDED in FM - low priority]
11- Demodulate data using appropriate dem matrix
12- Normalize to average continuum [NEEDED in FM - low priority - determine the Icont automatically]
13- correct cross-talk from I to QUV [NEEDED in FM - evaluation of these automatically - mid priority]
14- correct cross-talk from V to QU (interactive) [NEEDED in FM - evaluation of these automatically - mid priority]
15- correct cross-talk from I to QUV in 2D (do not use this, your PC will have a hangover)
16- Fringes correction [NEEDED in FM -TBD]
17- median to zero [NEEDED in FM]
18- save
19- RTE (RTE or CE or CE+RTE)
20- plots
Keywords in the header (modified or added) within this program:
CAL_DARK = 26181001 / Onboard calibrated for dark field ! Dark correction ( DID/file of dark if True)
CAL_FLAT = 26181101 / Onboard calibrated for gain table ! Dark correction ( DID/file of flat if True)
CAL_PRE = Prefilter / Prefilter correction ( DID/file of flat if True)
CAL_GHST= Prefilter / Ghost correction ( name+version of py module if True )
CAL_REAL= Prefilter / Prealigment of images before demodulation ( name+version of py module if True )
CAL_IPOL= 990510 / Onboard calibrated for instrumental polarizatio ! demodulation ( DID of demod matrix if True ) - demod matrix may be 4x4 or 2048x2048x4x4
CAL_CRT0= float / cross-talk from I to Q (slope value, wrt normalized data in python)
CAL_CRT1= float / cross-talk from I to Q (off-set value, wrt normalized data in python)
CAL_CRT2= float / cross-talk from I to U (slope value, wrt normalized data in python)
CAL_CRT3= float / cross-talk from I to U (off-set value, wrt normalized data in python)
CAL_CRT4= float / cross-talk from I to V (slope value, wrt normalized data in python)
CAL_CRT5= float / cross-talk from I to V (off-set value, wrt normalized data in python)
CAL_CRT6= float / cross-talk from V to Q (slope value, wrt normalized data in python)
CAL_CRT7= float / cross-talk from V to Q (off-set value, wrt normalized data in python)
CAL_CRT8= float / cross-talk from V to U (slope value, wrt normalized data in python)
CAL_CRT9= float / cross-talk from V to U (off-set value, wrt normalized data in python)
CAL_NORM= 990510 / Normalization constant PROC_Ic)
CAL_FRIN= 990510 / Fringe correction ( name+version of py module if True ) TBD (posibly we need the freqs.)
* CAL_PSF= 990510 / Onboard calibrated for instrumental PSF ! TBD
CAL_RTE= 990510 / ok
* CAL_SCIP= 'None' / Onboard scientific data analysis
* RTE_ITER= 4294967295 / Number RTE inversion iterations
(*) are not touched in this software.
Keywords CRPIX1 and CRPIX2 are updated following the new center calculation within the pipeline. Old values are stored in the history.
Keywords CRVAL1 and CRVAL2 are NOT updated but should be SET to zero!!!!
'''
version = 'V1.0 July 2021'
version = 'V1.0 13th September 2021'
version = 'V1.0 3th November 2021'
#added json configuration and modify all keyword names to be consistent with HRT pipe
version_cmilos = 'CMILOS v0.91 (July - 2021)'
printc('--------------------------------------------------------------',bcolors.OKGREEN)
printc('PHI FDT data reduction software (for develping purposes only) ',bcolors.OKGREEN)
printc(' version: '+ version,bcolors.OKGREEN)
printc(' version_cmilos: '+ version_cmilos,bcolors.OKGREEN)
printc('--------------------------------------------------------------',bcolors.OKGREEN)
if json_input:
# =========================================================================== #
# READING CONFIG FILE AND PRINTING
printc('--------------------------------------------------------------',bcolors.OKGREEN)
printc(' Reading config json file '+json_input,bcolors.OKGREEN)
with open(json_input) as j:
CONFIG = json.load(j)
verbose = CONFIG['verbose']
input_data_dir = CONFIG['input_data_dir']
data_f = CONFIG['data_f']
shrink_mask = CONFIG['shrink_mask']
center_method = CONFIG['center_method']
hough_params = CONFIG['hough_params']
instrument = CONFIG['instrument']
flat_f = CONFIG['flat_f']
dark_f = CONFIG['dark_f']
dark_c = CONFIG['dark_c']
flat_c = CONFIG['flat_c']
flat_index = CONFIG['flat_index']
norm_f = CONFIG['norm_f']
flat_scaling = CONFIG['flat_scaling']
prefilter_fits = CONFIG['prefilter_fits']
prefilter = CONFIG['prefilter']
output_dir = CONFIG['output_dir']
rte = CONFIG['rte']
correct_fringes = CONFIG['correct_fringes']
correct_ghost = CONFIG['correct_ghost']
putmediantozero = CONFIG['putmediantozero']
debug = CONFIG['debug']
loopthis = CONFIG['loopthis']
ItoQUV = CONFIG['ItoQUV']
VtoQU = CONFIG['VtoQU']
realign = CONFIG['realign']
ind_wave = CONFIG['ind_wave']
nlevel = CONFIG['nlevel']
import pprint
# Prints the nicely formatted dictionary
pprint.pprint(CONFIG)#, sort_dicts=False)
#-----------------
# READ DATA
#-----------------
data_filename = input_data_dir + data_f
if os.path.isfile(data_filename):
print("File exist")
else:
print("File not exist")
try:
data, header = fits_get(data_filename)
printc('-->>>>>>> Reading Data file: '+data_filename,color=bcolors.OKGREEN)
#
# PXBEG1 = 385 ; First read-out pixel in dimension 1
# PXEND1 = 1664 ; Last read-out pixel in dimension 1
# PXBEG2 = 385 ; First read-out pixel in dimension 2
# PXEND2 = 1664 ; Last read-out pixel in dimension 2
DID = header['PHIDATID']
ACC = header['ACCACCUM']
printc('-->>>>>>> data DID '+DID,color=bcolors.OKGREEN)
printc(' DATA IS DIVIDED by 256. ',color=bcolors.OKGREEN)
printc('-->>>>>>> Reshaping data to [wave,Stokes,y-dim,x-dim] ',color=bcolors.OKGREEN)
zd,yd,xd = data.shape
data = np.reshape(data,(zd//4,4,yd, xd))
data = data / 256. #from fix to 32
data = np.ascontiguousarray(data)
#/ PHI_FITS_FPA_settings
# FPIMGCMD= 8 / FPA image command
# FPA_SROW= 0 / FPA start row setting FPA_EROW= 1022 / FPA end row setting
# FPA_NIMG= 20 / FPA number of images set FPEXPSTC= 1592452786 / [s] FPA exposure start time coarse
# FPEXPSTF= 699245 / [us] FPA exposure start time fine
# INTTIME = 0.01 / [s] Exposure time of single readout
# TELAPSE = 58.1974400877953 / [s]
# Elapsed time between start and end of obser
# NSUMEXP = 480 / Number of detector readouts
# XPOSURE = 4.8 / [s] Total effective exposure time
# ACCLENGT= 4194304 / ACCU number of pixel set
# ACCNROWS= 6 / ACCU number of rows set
# ACCROWIT= 1 / ACCU number of row iterations set
# ACCNCOLS= 4 / ACCU number of columns set
# ACCCOLIT= 1 / ACCU number of column iterations set
# ACCACCUM= 20 / ACCU number of accumulations set
# ACCADDR = 0 / ACCU readout address (start)
except Exception:
printc("ERROR, Unable to open fits file: {}",data_filename,color=bcolors.FAIL)
return 0
header['history'] = ' Data processed with phifdt_pipe.py '+ version
header['history'] = ' and time '+ str(datetime.datetime.now())
header['history'] = ' Parameters normalize_flat: '+ str(norm_f)
header['history'] = ' Parameters flat_scaling: '+ str(flat_scaling)
header['history'] = ' Parameters shrink_mask: '+ str(shrink_mask)
header['history'] = ' Parameters center_method: '+ str(center_method)
header['history'] = ' Parameters Hough: '+ str(hough_params)
if verbose:
plib.show_one(data[0,0,:,:],vmin=0,xlabel='pixel',ylabel='pixel',title='Data first image raw (1 of 24)',cbarlabel='DN',save=None,cmap='gray')
# * CAL_RTE= 990510 / ok
# * CAL_SCIP= 'None' / Onboard scientific data analysis
# * RTE_ITER= 4294967295 / Number RTE inversion iterations
# * PHIDATID= '142010402' / PHI dataset Id
#-----------------
# TAKE DATA DIMENSIONS AND SCALING
#-----------------
PXBEG1 = int(header['PXBEG1']) - 1
PXEND1 = int(header['PXEND1']) - 1
PXBEG2 = int(header['PXBEG2']) - 1
PXEND2 = int(header['PXEND2']) - 1
printc('Dimensions: ',PXBEG1, PXEND1, PXBEG2, PXEND2,color=bcolors.OKGREEN)
if xd != (PXEND1 - PXBEG1 + 1) or yd != (PXEND2 - PXBEG2 + 1):
printc('ERROR, Keyword dimensions and data array dimensions dont match ',color=bcolors.FAIL)
return 0
if xd < 2047:
printc(' data cropped to: [',PXBEG1,',',PXEND1,'],[',PXBEG2,',',PXEND2,']',color=bcolors.WARNING)
data_scale = fits_get(data_filename,scaling = True)
#-----------------
# READ FLAT FIELDS
#-----------------
if flat_c:
printc('-->>>>>>> Reading flat file'+flat_f,color=bcolors.OKGREEN)
printc(' Assumes they are already normalized to ONE ',color=bcolors.OKGREEN)
printc(' input should be [wave X Stokes,y-dim,x-dim].',color=bcolors.OKGREEN)
try:
dummy,flat_header = fits_get(flat_f)
fz_d,fy_d,fx_d = dummy.shape
flat = np.zeros([24,2048,2048]).astype(np.float32)
PXBEG1_f = int(flat_header['PXBEG1']) - 1
PXEND1_f = int(flat_header['PXEND1']) - 1
PXBEG2_f = int(flat_header['PXBEG2']) - 1
PXEND2_f = int(flat_header['PXEND2']) - 1
if fx_d < 2047:
printc(' input flat was cropped to: [',PXBEG1_f,',',PXEND1_f,'],[',PXBEG2_f,',',PXEND2_f,']',color=bcolors.WARNING)
flat[:,PXBEG1_f:PXEND1_f+1,PXBEG2_f:PXEND2_f+1] = dummy
del dummy
printc('-->>>>>>> Reshaping Flat to [wave,Stokes,y-dim,x-dim] ',color=bcolors.OKGREEN)
fz,fy,fx = flat.shape
flat = np.reshape(flat,(fz//4,4,fy,fx))
except Exception:
printc("ERROR, Unable to open flats file: {}",flat_f,color=bcolors.FAIL)
return 0
if verbose:
plib.show_one(flat[0,0,:,:],xlabel='pixel',ylabel='pixel',title='Flat first image raw (1 of 24)',cbarlabel='Any (as input)',save=None,cmap='gray')
else:
printc('-->>>>>>> No flats mode ',color=bcolors.WARNING)
#-----------------
# READ AND CORRECT DARK FIELD
#-----------------
if dark_c:
data,header = phi_correct_dark(dark_f,data,header,data_scale,verbose = verbose)
else:
printc('-->>>>>>> No darks mode ',color=bcolors.WARNING)
#-----------------
# FIND DATA CENTER
#-----------------
printc('-->>>>>>> finding the center of the solar disk (needed for masking) ',color=bcolors.OKGREEN)
try:
if center_method == 'Hough':
inner_radius,outer_radius,steps = hough_params
c, radius,threshold = find_circle_hough(data[0,0,:,:],inner_radius,outer_radius,steps,threshold = 0.01,normalize=False,verbose=False)
#c = np.roll(c,1)
cx = c[0]
cy = c[1]
#TBE PUT IN CORRECT UNITS
elif center_method == 'circlefit':
cy,cx,radius=find_center(data[0,0,:,:]) #OJO Cy... Cx
c = np.array([int(cx),int(cy)]) #El vector es [0,1,2,...] == [x,y,z,...] == [cx,cy,cz,...] Pero esto ultimo esta al reves
radius = int(radius)
elif center_method == None:
#get from header
cx = header['CRPIX1']
cy = header['CRPIX2']
c = np.array([int(cx),int(cy)]) #El vector es [0,1,2,...] == [x,y,z,...] == [cx,cy,cz,...] Pero esto ultimo esta al reves
radius = header['RSUN_ARC']/header['CDELT1']
else:
raise ValueError("ERROR in center determination method - check input 'circlefit','Hough',null/None")
except ValueError as err:
print(err.args)
return 0
#Uptade header with new centers
if center_method == 'Hough' or center_method == 'circlefit':
printc(' Uptade header with new center:',color=bcolors.OKBLUE)
printc(' OLD center:',color=bcolors.OKBLUE)
printc(' at: CRPIX1[x]=',header['CRPIX1'],' CRPIX2[y]=',header['CRPIX2'],' radius=',radius,color=bcolors.OKBLUE)
header['history'] = ' CRPIX 1 and CRPIX2 uptated from ' + str(header['CRPIX1'])+ ' and ' + str(header['CRPIX2'])
header['CRPIX1'] = (round(cx, 2))
header['CRPIX2'] = (round(cy, 2))
printc(' NEW center:',color=bcolors.OKBLUE)
printc(' at: CRPIX1[x]=',header['CRPIX1'],' CRPIX2[y]=',header['CRPIX2'],' radius=',radius,color=bcolors.OKBLUE)
printc('ATTENTION: Keywords CRVAL1 and CRVAL2 are NOT updated but should be SET to zero!!!!',color=bcolors.FAIL)
else:
printc(' Using header image center:',color=bcolors.OKBLUE)
printc(' at: CRPIX1[x]=',header['CRPIX1'],' CRPIX2[y]=',header['CRPIX2'],' radius=',radius,color=bcolors.OKBLUE)
#OJO.
# find_circle_hough devuelve c = c[0] = x and c[1] = y !!!!!!!!!!!!!!
# Esto viene porque en el KLL esta definido así (al reves) en la rutina votes()
#-----------------
# TAKE ONLY DISK WITH MARGIN
#-----------------
printc('-->>>>>>> Creating a mask for RTE with ',shrink_mask,' px margin')
size_of_mask = radius - shrink_mask
rx = [int(c[0]-size_of_mask),int(c[0]+size_of_mask)]
ry = [int(c[1]-size_of_mask),int(c[1]+size_of_mask)]
mask,coords = generate_circular_mask([xd-1,yd-1],size_of_mask,size_of_mask)
mask = shift(mask, shift=(c[0]-xd//2,c[1]-yd//2), fill_value=0)
printc(' RX = ', rx, 'RY = ', ry, color=bcolors.WARNING)
#-----------------
# GET INFO ABOUT VOLTAGES/WAVELENGTHS, determine continuum and new flat
#-----------------
printc('-->>>>>>> Obtaining voltages from data ',color=bcolors.OKGREEN)
wave_axis,voltagesData,tunning_constant,cpos,ref_wavelength = fits_get_sampling(data_filename)
printc(' Data FG voltages: ',voltagesData,color=bcolors.OKBLUE)
printc(' Continuum position at wave: ', cpos,color=bcolors.OKBLUE)
printc(' Data ref_wavelength [mA]: ',ref_wavelength,color=bcolors.OKBLUE)
printc(' Data wave axis [mA]: ',wave_axis,color=bcolors.OKBLUE)
printc(' Data wave axis - axis[0] [mA]: ',wave_axis - wave_axis[0],color=bcolors.OKBLUE)
dummy_1 = (voltagesData-np.roll(voltagesData,-1))*(tunning_constant*1000)
dummy_2 = np.sort(np.abs(dummy_1))
sampling = np.mean(dummy_2[0:-2])
printc(' Data average sampling [mA]: ',sampling,' using tunning constant: ',(tunning_constant*1000),color=bcolors.OKBLUE)
#-----------------
# ROLL DATA IF CONTINUUM IS IN DIFFERENT POSITION
# Probably before demodulation and flat is better!!!!
#-----------------
if cpos != 0:
datar = np.copy(data)
voltagesDatar = np.copy(voltagesData)
wave_axisr = np.copy(wave_axis)
if voltagesData[cpos] < voltagesData[0]:
#continuum is on the right but it is in the blue!!!!
printc(' Rolling data to move continuum from right (red) to left (blue)',color=bcolors.WARNING)
printc(' * the continuum was TAKEN in the BLUE, stored in the RED, but we want it in the BLUE *',color=bcolors.WARNING)
for i in range(zd//4):
#print((i+1)%(zd//4),i%(zd//4),i,(zd//4))
datar[(i+1)%(zd//4),:,:,:] = data[i%(zd//4),:,:,:] # np.roll(data, 4, axis=0)
voltagesDatar[(i+1)%(zd//4)] = voltagesData[i%(zd//4)] # np.roll(data, 4, axis=0)
wave_axisr[(i+1)%(zd//4)] = wave_axis[i%(zd//4)] # np.roll(data, 4, axis=0)
if voltagesData[cpos] > voltagesData[0]:
#continuum is on the right but it is in the blue!!!!
printc(' Rolling data to move continuum from right (red) to left (blue)',color=bcolors.WARNING)
printc(' * the continuum was TAKEN in the RED but we want it in the BLUE',color=bcolors.WARNING)
printc(' * Notice that this is necessary for the FLAT correction',color=bcolors.FAIL)
printc(' * but the wavelength axis has changed the continuum point *',color=bcolors.FAIL)
for i in range(zd//4):
#print((i+1)%(zd//4),i%(zd//4),i,(zd//4))
datar[(i+1)%(zd//4),:,:,:] = data[i%(zd//4),:,:,:] # np.roll(data, 4, axis=0)
voltagesDatar[(i+1)%(zd//4)] = voltagesData[i%(zd//4)] # np.roll(data, 4, axis=0)
wave_axisr[(i+1)%(zd//4)] = wave_axis[i%(zd//4)] # np.roll(data, 4, axis=0)
data = np.copy(datar)
voltagesData = np.copy(voltagesDatar)
wave_axis = np.copy(wave_axisr)
del datar
del voltagesDatar
del wave_axisr
cpos = 0
printc(' New FG voltages: ',voltagesData,color=bcolors.OKBLUE)
printc(' NEW continuum position at wave: ', cpos,color=bcolors.OKBLUE)
printc(' NEW data wave axis [mA]: ',wave_axis,color=bcolors.OKBLUE)
if flat_c:
printc('-->>>>>>> Obtaining voltages from flats ',color=bcolors.OKGREEN)
#ff = '../Nov-2020-STP122/solo_L0_phi-fdt-flat_0645767986_V202012091123I_0066181100.fits'
wave_axis_f,voltagesFlat,tunning_constant_f,cpos_f,ref_wavelength_f = fits_get_sampling(flat_f)
printc(' FLAT FG voltages: ',voltagesFlat,color=bcolors.OKBLUE)
printc(' FLAT Continuum position at wave: ', cpos_f,color=bcolors.OKBLUE)
printc(' FLAT wave axis [mA]: ',wave_axis_f,color=bcolors.OKBLUE)
printc(' FLAT ref_wavelength [mA]: ',ref_wavelength_f,color=bcolors.OKBLUE)
printc(' FLAT wave axis [mA]: ',wave_axis_f,color=bcolors.OKBLUE)
printc(' FLAT wave axis - ref_wavelength [mA]: ',wave_axis_f - ref_wavelength_f,color=bcolors.OKBLUE)
dummy_1 = (voltagesFlat-np.roll(voltagesFlat,-1))*(tunning_constant*1000)
dummy_2 = np.sort(np.abs(dummy_1))
sampling_f = np.mean(dummy_2[0:-2])
printc(' FLAT average sampling [mA]: ',sampling_f,color=bcolors.OKBLUE)
# printc('-->>>>>>> Reshaping flat to [wave,Stokes,y-dim,x-dim]',color=bcolors.OKGREEN)
# flat = np.reshape(flat,(fz//4,4,fy, fx))
#-----------------
# ROLL FLAT IF CONTINUUM IS IN DIFFERENT POSITION
# Probably before demodulation and flat is better!!!!
#-----------------
if flat_c:
if cpos_f != 0:
flatr = np.copy(flat)
voltagesFlatr = np.copy(voltagesFlat)
wave_axis_fr = np.copy(wave_axis_f)
if voltagesFlat[cpos_f] < voltagesFlat[0]:
#continuum is on the right but it is in the blue!!!!
printc(' Rolling flat to move continuum from right (red) to left (blue)',color=bcolors.WARNING)
printc(' * the continuum was TAKEN in the BLUE, stored in the RED, but we want it in the BLUE *',color=bcolors.WARNING)
for i in range(fz//4):
#print((i+1)%(fz//4),i%(fz//4),i)
flatr[(i+1)%(fz//4),:,:,:] = flat[i%(fz//4),:,:,:] # np.roll(data, 4, axis=0)
voltagesFlatr[(i+1)%6] = voltagesFlat[i%(fz//4)] # np.roll(data, 4, axis=0)
wave_axis_fr[(i+1)%(zd//4)] = wave_axis_f[i%(zd//4)] # np.roll(data, 4, axis=0)
if voltagesFlat[cpos_f] > voltagesFlat[0]:
#continuum is on the right but it is in the blue!!!!
printc(' Rolling flat to move continuum from right (red) to left (blue)',color=bcolors.WARNING)
printc(' * the continuum was TAKEN in the RED but we want it in the BLUE',color=bcolors.WARNING)
printc(' * Notice that this is necessary for the FLAT correction',color=bcolors.FAIL)
printc(' * but the wavelength axis has changed the continuum point *',color=bcolors.FAIL)
for i in range(fz//4):
#print((i+1)%(fz//4),i%(fz//4),i)
flatr[(i+1)%(fz//4),:,:,:] = flat[i%(fz//4),:,:,:] # np.roll(data, 4, axis=0)
voltagesFlatr[(i+1)%6] = voltagesFlat[i%(fz//4)] # np.roll(data, 4, axis=0)
wave_axis_fr[(i+1)%(zd//4)] = wave_axis_f[i%(zd//4)] # np.roll(data, 4, axis=0)
flat = np.copy(flatr)
voltagesFlat = np.copy(voltagesFlatr)
wave_axis_f = np.copy(wave_axis_fr)
del flatr
del voltagesFlatr
del wave_axis_fr
cpos_f = 0
printc(' New Flat FG voltages: ',voltagesFlat,color=bcolors.OKBLUE)
printc(' NEW Flat continuum position at wave: ', cpos_f,color=bcolors.OKBLUE)
printc(' NEW Flat data wave axis [mA]: ',wave_axis_f,color=bcolors.OKBLUE)
# TODO: INTERPOLATE THE FLAT TO MATCH THE WAVELENG (CAVITY)
# from scipy.interpolate import RegularGridInterpolator
# x = np.linspace(0,2047,2048).astype(int)
# y = np.linspace(0,2047,2048).astype(int)
# z = np.array([-300.,-140.,-70.,0.,70.,140.,300.]) #ojo con el -300
# zn = np.array([-175.,-140.,-105.,-70.,-35.,0.,35.,70.,105.,140.,175.,300.])
# flat_rsw1 = np.concatenate(((flat_rsw1[5,:,:,:])[np.newaxis,:,:,:],flat_rsw1))
# fn = RegularGridInterpolator((z,y,x), flat_rsw1[:,0,:,:])
# pts = np.array([-40,10,10])
# print(fn(pts))
# pts = np.meshgrid(-40.,y,x)
# pts = np.array([m.flatten() for m in pts])
# flat_n = fn(pts.T)
# result = flat_n.reshape((2048,2048))
# plt.imshow(result,vmin=0.9,vmax=1.1)
# flat_n = np.zeros((12,4,2048,2048))
# for i in range(4):
# fn = RegularGridInterpolator((z,y,x), flat_rsw1[:,i,:,:],bounds_error=False)
# for j in range(12):
# print(i,zn[j])
# pts_list = np.meshgrid(zn[j],y,x)
# pts = np.array([m.flatten() for m in pts_list])
# flat_n[j,i,:,:] = fn(pts.T).reshape((2048,2048))
#-----------------
# APPLY FLAT CORRECTION
# TODO: TAKE THE REAL FLAT
#-----------------
factor = 0.05
rrx = [int(c[0]-radius*factor),int(c[0]+radius*factor)]
rry = [int(c[1]-radius*factor),int(c[1]+radius*factor)]
if flat_c:
printc('-->>>>>>> Correcting Flatfield',color=bcolors.OKGREEN)
try:
if (len(flat_index)) == 6:
print(' Changing flat index to ',flat_index)
except:
flat_index = [0,1,2,3,4,5]
for p in range(4):
for l in range(int(zd//4)):
print(' ... pol: ',p,' wave: ',l,' index: ',flat_index[l])
dummy_flat = (flat[flat_index[l],p,PXBEG2:PXEND2+1,PXBEG1:PXEND1+1]/float(flat_scaling))
if norm_f:
print(' normalizing flats using region x = [',rrx[0],':',rrx[1],'] y = ]',rry[0],':',rry[1],']')
mm = np.mean(dummy_flat[rry[0]:rry[1],rrx[0]:rrx[1]])
dummy_flat = dummy_flat / mm
data[l,p,:,:] = data[l,p,:,:]/dummy_flat
del dummy_flat
# locations = find_string(flat_f,'_')
# try:
# DID_flat = flat_f[locations[-1]+1:locations[-1]+10]
# print('DID: ',np.float(DID_flat))
# except:
# DID_flat = flat_f[:-4]
# printc("Unable to get DID from: {}",flat_f,color=bcolors.WARNING)
# locations = find_string(dark_f,'/')
# DID_flat = flat_f[locations[-1]+1:]
# printc('DID: ',DID_flat,' -->> WILL NOT BE A NUMBER',color=bcolors.WARNING)
DID_flat = flat_header['PHIDATID']
if 'CAL_FLAT' in header: # Check for existence
header['CAL_FLAT'] = DID_flat
else:
header.set('CAL_FLAT', DID_flat, 'Onboard calibrated for gain table',after='CAL_DARK')
if verbose:
plib.show_one(data[cpos,0,:,:],vmax=None,vmin=0,xlabel='pixel',ylabel='pixel',title='Data / flat at continuum',cbarlabel='DN',save=None,cmap='gray')
#-----------------
# CORRECT PREFILTER
#-----------------
if prefilter:
data,header = phi_correct_prefilter(prefilter_fits,header,data,voltagesData,verbose = verbose)
#-----------------
# GHOST CORRECTION
#-----------------
if correct_ghost:
data,header = phi_correct_ghost(data,header,radius,verbose = True)
#-----------------
# REALIGN DATA BEFORE DEMODULATION
#-----------------
if realign:
printc('-->>>>>>> Realigning data... ',color=bcolors.OKGREEN)
for i in range(zd//4):
s_x,s_y,_ = PHI_shifts_FFT(data[i,:,:,:],prec=500,verbose=verbose,norma=False)
for j in range(4):
data[i,j,:,:] = shift_subp(data[i,j,:,:], shift=[s_x[j],s_y[j]]) #estra y,z asi que esta al reves FFT
if 'CAL_REAL' in header: # Check for existence
header['CAL_REAL'] = 'FFT'
else:
header.set('CAL_REAL', 'FFT', 'Realigment of data (phifdt_pipe_modules.py)',after='CAL_DARK')
#-----------------
# APPLY DEMODULATION
#-----------------
printc('-->>>>>>> Demodulating data... ',color=bcolors.OKGREEN)
if debug:
datan = np.copy(data)
ds = np.copy(data)
demodM = np.array([[0.168258, 0.357277, 0.202212, 0.273266],\
[-0.660351, 0.314981, 0.650029, -0.299685],\
[ 0.421242, 0.336994, -0.183068, -0.576202],\
[-0.351933, 0.459820, -0.582167, 0.455458]])
for i in range(zd//4):
for l in range(xd):
for m in range(yd):
datan[i,:,m,l] = np.matmul(demodM, ds[i,:,m,l] )
plib.show_four_row(datan[3,0,:,:],datan[3,1,:,:],datan[3,2,:,:],datan[3,3,:,:],svmin=[0,-0.2,-0.2,-1.],svmax=[100,0.1,0.1,0.1])
data, header = phi_apply_demodulation(data,instrument,header=header)
if verbose == 1:
plib.show_four_row(data[3,0,:,:],data[3,1,:,:],data[3,2,:,:],data[3,3,:,:],title=['I','Q','U','V'],zoom = 3,svmin=[0,-0.004,-0.004,-0.004],svmax=[1.2,0.004,0.004,0.004])
# with pyfits.open(data_filename) as hdu_list:
# hdu_list[0].data = data
# hdu_list[0].header = header
# hdu_list.writeto('dummy.fits', clobber=True)
#-----------------
# APPLY NORMALIZATION
#-----------------
printc('-->>>>>>> Applying normalization --',color=bcolors.OKGREEN)
nrm = np.mean(data[cpos,0,rry[0]:rry[1],rrx[0]:rrx[1]])
print(' Norma is: ',nrm,' evaluated in x = [',rrx[0],':',rrx[1],'] y = [',rry[0],':',rry[1],']')
data = data/nrm
if 'CAL_NORM' in header: # Check for existence
header['CAL_NORM'] = np.round(nrm,6)
else:
header.set('CAL_NORM', np.round(nrm,6), 'Normalization constant PROC_Ic',after='CAL_DARK')
if debug:
datan = datan/nrm
if debug:
plib.show_four_row(data[3,0,:,:],data[3,1,:,:],data[3,2,:,:],data[3,3,:,:])
plib.show_four_row(datan[3,0,:,:],datan[3,1,:,:],datan[3,2,:,:],datan[3,3,:,:])
if verbose == 1:
plib.show_four_row(data[3,0,:,:],data[3,1,:,:],data[3,2,:,:],data[3,3,:,:],title=['I','Q','U','V'],zoom = 3,svmin=[0,-0.004,-0.004,-0.004],svmax=[1.2,0.004,0.004,0.004])
#-----------------
# GHOST CORRECTION AFTER DEMODULATION
#-----------------
# if correct_ghost:
# data,header = phi_correct_ghost_dm(data,header,radius,verbose = verbose)
#-----------------
# CROSS-TALK CALCULATION
#-----------------
if ItoQUV or putmediantozero:
factor_media = 0.8 # 80% of the disk
rrx_m = [int(c[0]-radius*factor_media),int(c[0]+radius*factor_media)]
rry_m = [int(c[1]-radius*factor_media),int(c[1]+radius*factor_media)]
maski,coords = generate_circular_mask([xd-1,yd-1],radius*factor_media,radius*factor_media)
maski = shift(maski, shift=(c[0]-xd//2,c[1]-yd//2), fill_value=0).astype(int)
if ItoQUV:
printc('-->>>>>>> Cross-talk correction from Stokes I to Stokes Q,U,V --',color=bcolors.OKGREEN)
printc(' Using ',factor_media*100,'% of the disk ',color=bcolors.OKGREEN)
printc(' Crosstalk evaluated in x = [',rrx_m[0],':',rrx_m[1],'] y = [',rry_m[0],':',rry_m[1],']',' using ',factor_media*100,"% of the disk",color=bcolors.OKBLUE)
if ind_wave:
for i in range(zd//4):
printc(' Individual wavelengths....',color=bcolors.OKBLUE)
broadcastd = data[i,:,rry_m[0]:rry_m[1],rrx_m[0]:rrx_m[1]]
data_dummy = data[:,:,rry_m[0]:rry_m[1],rrx_m[0]:rrx_m[1]]*0. + broadcastd[np.newaxis,:,:,:]
cQ,cU,cV = crosstalk_ItoQUV(data_dummy[:,:,rry_m[0]:rry_m[1],rrx_m[0]:rrx_m[1]],npoints=10000,verbose=verbose)
#-----------------
# CROSS-TALK CORRECTION
#-----------------
printc(' Applying cross-talk correction...',color=bcolors.OKGREEN)
data[i,1,:,:] = data[i,1,:,:] - cQ[0]*data[i,0,:,:] - cQ[1]
data[i,2,:,:] = data[i,2,:,:] - cU[0]*data[i,0,:,:] - cU[1]
data[i,3,:,:] = data[i,3,:,:] - cV[0]*data[i,0,:,:] - cV[1]
if verbose:
plt.hist(data[0,1,maski > 0].flatten(), bins='auto')
plt.title('Stokes Q')
plt.hist(data[0,2,maski > 0].flatten(), bins='auto')
plt.title('Stokes U')
plt.hist(data[0,3,maski > 0].flatten(), bins='auto')
plt.title('Stokes V')
plt.show()
else:
cQ,cU,cV = crosstalk_ItoQUV(data[:,:,rry_m[0]:rry_m[1],rrx_m[0]:rrx_m[1]],verbose=verbose,npoints=10000)
# rrx = [int(c[1]),int(c[1]+r*factor)]
# rry = [int(c[0]-r*factor),int(c[0])]
# cQ,cU,cV = crosstalk_ItoQUV(data[:,:,rry[0]:rry[1],rrx[0]:rrx[1]],verbose=verbose,npoints=10000)
# rrx = [int(c[1]-r*factor),int(c[1])]
# rry = [int(c[0]),int(c[0]+r*factor)]
# cQ,cU,cV = crosstalk_ItoQUV(data[:,:,rry[0]:rry[1],rrx[0]:rrx[1]],verbose=verbose,npoints=10000)
# return
#-----------------
# CROSS-TALK CORRECTION
#-----------------
printc(' Applying cross-talk correction...',color=bcolors.OKGREEN)
data[:,1,:,:] = data[:,1,:,:] - cQ[0]*data[:,0,:,:] - cQ[1]
data[:,2,:,:] = data[:,2,:,:] - cU[0]*data[:,0,:,:] - cU[1]
data[:,3,:,:] = data[:,3,:,:] - cV[0]*data[:,0,:,:] - cV[1]
if verbose:
plib.show_hist(data[0,1, maski > 0].flatten(), bins='auto',title=' ',leave = 'open',color='green')
plib.show_hist(data[0,2, maski > 0].flatten(), bins='auto',title=' ',leave = 'open',color='red')
plib.show_hist(data[0,3, maski > 0].flatten(), bins='auto',title='Stokes Q/U/V - no zero',color='blue')
plib.show_four_row(data[2,0,:,:],data[2,1,:,:],data[2,2,:,:],data[2,3,:,:],title=['I - corr','Q - corr','U - corr','V - corr'],zoom=3,svmin=[0,-0.004,-0.004,-0.004],svmax=[1.2,0.004,0.004,0.004])
# PLT_RNG = 2
# plib.show_four_row(data[1,0,:,:],data[1,1,:,:],data[1,2,:,:],data[1,3,:,:],title=['I','Q','U','V'],svmin=[0.1,-0.002,-0.002,-0.003],svmax=[1.1,0.002,0.002,0.003])#,save='t1_'+str(loopthis)+'.png')
# plib.show_four_row(data[3,0,:,:],data[3,1,:,:],data[3,2,:,:],data[3,3,:,:],title=['I','Q','U','V'],svmin=[0.1,-0.002,-0.002,-0.003],svmax=[1.1,0.002,0.002,0.003])#,save='t3_'+str(loopthis)+'.png')
# plib.show_four_row(data[5,0,:,:],data[5,1,:,:],data[5,2,:,:],data[5,3,:,:],title=['I','Q','U','V'],svmin=[0.1,-0.002,-0.002,-0.003],svmax=[1.1,0.002,0.002,0.003])#,save='t5_'+str(loopthis)+'.png')
# np.save('data_dummy',data)
if 'CAL_CRT0' in header: # Check for existence
header['CAL_CRT0'] = np.round(cQ[0]*100,3)
else:
header.set('CAL_CRT0', np.round(cQ[0]*100,3), 'cross-talk I to Q (slope in %), wrt CAL_NROM ',after='CAL_DARK')
if 'CAL_CRT1' in header: # Check for existence
header['CAL_CRT1'] = np.round(cQ[1]*100,3)
else:
header.set('CAL_CRT1', np.round(cQ[1]*100,3), 'cross-talk I to Q (off-set in %), wrt CAL_NROM ',after='CAL_CRT0')
if 'CAL_CRT2' in header: # Check for existence
header['CAL_CRT2'] = np.round(cU[0]*100,3)
else:
header.set('CAL_CRT2', np.round(cU[0]*100,3), 'cross-talk I to U (slope in %) alue, wrt CAL_NROM ',after='CAL_CRT1')
if 'CAL_CRT3' in header: # Check for existence
header['CAL_CRT3'] = np.round(cU[1]*100,3)
else:
header.set('CAL_CRT3', np.round(cU[1]*100,3), 'cross-talk I to U (off-set in %), wrt CAL_NROM ',after='CAL_CRT2')
if 'CAL_CRT4' in header: # Check for existence
header['CAL_CRT4'] = np.round(cV[0]*100,3)
else:
header.set('CAL_CRT4', np.round(cV[0]*100,3), 'cross-talk I to V (slope in %), wrt CAL_NROM',after='CAL_CRT3')
if 'CAL_CRT5' in header: # Check for existence
header['CAL_CRT5'] = np.round(cV[1]*100,3)
else:
header.set('CAL_CRT5', np.round(cV[1]*100,3), 'cross-talk I to V (off-set in %), wrt CAL_NROM ',after='CAL_CRT4')
#-----------------
# CROSS-TALK CALCULATION FROM V TO QU (Interactive)
#-----------------
if VtoQU:
printc('-->>>>>>> Cross-talk correction from Stokes V to Stokes Q,U ',color=bcolors.OKGREEN)
factor = 0.3 # 30% of the disk
rrx = [int(c[0]-radius*factor),int(c[0]+radius*factor)]
rry = [int(c[1]-radius*factor),int(c[1]+radius*factor)]
print(' Cross-talk evaluated in x = [',rrx[0],':',rrx[1],'] y = [',rry[0],':',rry[1],']',' using ',factor*100,"% of the disk")
cVQ,cVU = cross_talk_QUV(data[:,:,rry[0]:rry[1],rrx[0]:rrx[1]],nran = 2000,nlevel=nlevel,block=False)
option = input('Do you want to apply the correction (y/n) [n]: ')
if option == 'y':
datao = np.copy(data)
print('Applying V to QU cross-talk correction...')
datao[:,2,:,:] = data[:,2,:,:] - cVQ[0]*data[:,3,:,:] - cVQ[1]
datao[:,3,:,:] = data[:,3,:,:] - cVU[0]*data[:,3,:,:] - cVU[1]
plib.show_two(data[3,1,ry[0]:ry[1],rx[0]:rx[1]],datao[3,1,ry[0]:ry[1],rx[0]:rx[1]],block=False,title=['Stokes Q','Stokes Q corrected'],zoom=3)
plib.show_two(data[3,2,ry[0]:ry[1],rx[0]:rx[1]],datao[3,2,ry[0]:ry[1],rx[0]:rx[1]],block=False,title=['Stokes U','Stokes U corrected'],zoom=3)
option2 = input('Do you wnat to continue (y/n) [n]: ')
if option2 == 'y':
data = np.copy(datao)
del datao
plt.close()
if 'CAL_CRT6' in header: # Check for existence
header['CAL_CRT6'] = np.round(cVQ[0]*100,3)
else:
header.set('CAL_CRT6', np.round(cVQ[0]*100,3), 'cross-talk V to Q (slope in %), wrt CAL_NROM ',after='CAL_CRT5')
if 'CAL_CRT7' in header: # Check for existence
header['CAL_CRT7'] = np.round(cVQ[1]*100,3)
else:
header.set('CAL_CRT7', np.round(cVQ[1]*100,3), 'cross-talk V to Q (off-set in %), wrt CAL_NROM ',after='CAL_CRT6')
if 'CAL_CRT8' in header: # Check for existence
header['CAL_CRT8'] = np.round(cVU[0]*100,3)
else:
header.set('CAL_CRT8', np.round(cVU[0]*100,3), 'cross-talk V to U (slope in %), wrt CAL_NROM ',after='CAL_CRT7')
if 'CAL_CRT9' in header: # Check for existence
header['CAL_CRT9'] = np.round(cVU[1]*100,3)
else:
header.set('CAL_CRT9', np.round(cVU[1]*100,3), 'cross-talk V to U (off-set in %), wrt CAL_NROM ',after='CAL_CRT8')
#-----------------
# CROSS-TALK CALCULATION FROM I TO QUV (2D)
#-----------------
if do2d >= 2:
printc('---------------------------------------------------------',color=bcolors.OKGREEN)
printc('-- IN 2-Dimensions --')
printc('-- Cross-talk correction from Stokes I to Stokes Q,U,V --')
printc('---------------------------------------------------------',color=bcolors.OKGREEN)
size = do2d
cV0,cV1 = crosstalk_ItoQUV2d(data[:,:,ry[0]:ry[1],rx[0]:rx[1]],size=size)
nsize = size-1
dim = ry[1]-ry[0]-nsize
cV0 = cV0.reshape(dim,dim)
cV1 = cV1.reshape(dim,dim)
plib.show_one(cV0,vmin=-0.005,vmax=0.005)
data[:,3,ry[0]+nsize//2:ry[1]-nsize//2-1,rx[0]+nsize//2:rx[1]-nsize//2-1] = \
data[:,3,ry[0]+nsize//2:ry[1]-nsize//2-1,rx[0]+nsize//2:rx[1]-nsize//2-1] -\
cV0*data[:,0,ry[0]+nsize//2:ry[1]-nsize//2-1,rx[0]+nsize//2:rx[1]-nsize//2-1] #- 0.95*cV1
#-----------------
# FRINGING -
#-----------------
if correct_fringes == 'auto' or correct_fringes == 'manual':
if verbose:
plib.show_four_row(data[2,0,:,:],data[2,1,:,:],data[2,2,:,:],data[2,3,:,:],title=['I - before fringe','Q','U','V'],zoom=3,svmin=[0,-0.004,-0.004,-0.004],svmax=[1.2,0.004,0.004,0.004])
data, header = phi_correct_fringes(data,header,option=correct_fringes,verbose=verbose)
if verbose:
plib.show_four_row(data[2,0,:,:],data[2,1,:,:],data[2,2,:,:],data[2,3,:,:],title=['I - after fringe','Q','U','V'],zoom=3,svmin=[0,-0.004,-0.004,-0.004],svmax=[1.2,0.004,0.004,0.004])
elif correct_fringes == False:
pass
else:
printc('Error in option finge correction. Options are "manual", "auto" or false. Given: ',color=bcolors.WARNING)
print(correct_fringes)
#-----------------
# MEDIAN TO CERO
#-----------------
if putmediantozero:
printc('-->>>>>>> Putting median to zero ',color=bcolors.OKGREEN)
printc(' Median evaluated in x = [',rrx_m[0],':',rrx_m[1],'] y = [',rry_m[0],':',rry_m[1],']',' using ',factor*100,"% of the disk",color=bcolors.OKBLUE)
for i in range(zd//4):
PQ = np.median( data[i,1, maski > 0])#,axis=(1,2))
PU = np.median( data[i,2, maski > 0])#,axis=(1,2))
PV = np.median( data[i,3, maski > 0])#,axis=(1,2))
# PQ = np.median(data[:,1,rry[0]:rry[1],rrx[0]:rrx[1]],axis=(1,2))
# PU = np.median(data[:,2,rry[0]:rry[1],rrx[0]:rrx[1]],axis=(1,2))
# PV = np.median(data[:,3,rry[0]:rry[1],rrx[0]:rrx[1]],axis=(1,2))
data[i,1,:,:] = data[i,1,:,:] - PQ#[:,np.newaxis,np.newaxis]
data[i,2,:,:] = data[i,2,:,:] - PU#[:,np.newaxis,np.newaxis]
data[i,3,:,:] = data[i,3,:,:] - PV#[:,np.newaxis,np.newaxis]
printc(PQ,PU,PV)
if verbose == 1:
plib.show_hist(data[0,1, maski > 0].flatten(), bins='auto',title=' ',leave='open',color='green')
plib.show_hist(data[0,2, maski > 0].flatten(), bins='auto',title=' ',leave='open',color='red')
plib.show_hist(data[0,3, maski > 0].flatten(), bins='auto',title='Stokes Q/U/V - zero',color='blue')
plib.show_four_row(data[3,0,:,:],data[3,1,:,:],data[3,2,:,:],data[3,3,:,:],title=['I','Q','U','V'],zoom=3,svmin=[0,-0.004,-0.004,-0.004],svmax=[1.2,0.004,0.004,0.004])
header['history'] = ' Parameters putmediantozero [%]: '+ str(np.round(PQ*100,6))+ ' '+ str(np.round(PU*100,6))+ ' '+ str(np.round(PV*100,6))
#-----------------
#CHECK FOR INFs
#-----------------
data[np.isinf(data)] = 0
data[np.isnan(data)] = 0
#-----------------
# SAVE DATA TODO: CMILOS FORMAT AND FITS
#-----------------
#check if npz,pngs and level2 exist
dirs = ['npz','pngs','level2']
for checkit in dirs:
check_dir = os.path.isdir(output_dir+checkit)
if not check_dir:
os.makedirs(output_dir+checkit)
print("created folder : ", output_dir+checkit)
else:
print(output_dir+checkit, "folder already exists.")
printc('---------------------------------------------------------',color=bcolors.OKGREEN)
if outfile == None:
#basically replace L1 by L1.5
try:
outfile_L2 = set_level(data_f,'L1','L2')
outfile_L2 = set_level(outfile_L2,'ilam','stokes')
except:
outfile_L2 = set_level(data_f,'L0','L2')
outfile_L2 = set_level(outfile_L2,'ilam','stokes')
printc(' Saving data to: ',output_dir+'level2/'+outfile_L2)
# hdu = pyfits.PrimaryHDU(data)
# hdul = pyfits.HDUList([hdu])
# hdul.writeto(outfile, overwrite=True)
with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = data
# header = hdu_list[0].header
hdu_list[0].header = header
# Add a new key to the header
# header.insert(20, ('NEWKEY', 'OMIT', 'test'))
#header.set('NEWKEY','50.5')
hdu_list.writeto(output_dir+'level2/'+outfile_L2, clobber=True)
# hdu_list.writeto(directory+outfile+'_L1.fits', clobber=True)
# with pyfits.open(data_f) as hdu_list:
# hdu_list[0].data = mask
# hdu_list.writeto(directory+outfile+'_red-mask.fits', clobber=True)
#-----------------
# INVERSION OF DATA WITH CMILOS
#-----------------
if rte == 'RTE' or rte == 'CE' or rte == 'CE+RTE':
printc('---------------------RUNNING CMILOS --------------------------',color=bcolors.OKGREEN)
rte_invs = np.zeros((12,yd,xd)).astype(float)
rte_invs[:,ry[0]:ry[1],rx[0]:rx[1]] = generate_level2(data[:,:,ry[0]:ry[1],rx[0]:rx[1]],wave_axis,rte)
rte_invs_noth = np.copy(rte_invs)
umbral = 3.
noise_in_V = np.mean(data[0,3,rry[0]:rry[1],rrx[0]:rrx[1]])
low_values_flags = np.max(np.abs(data[:,3,:,:]),axis=0) < noise_in_V*umbral # Where values are low
rte_invs[2,low_values_flags] = 0
rte_invs[3,low_values_flags] = 0
rte_invs[4,low_values_flags] = 0
for i in range(12):
rte_invs[i,:,:] = rte_invs[i,:,:] * mask
#save plots!!!!
if verbose:
plib.show_four_row(rte_invs_noth[2,:,:],rte_invs_noth[3,:,:],rte_invs_noth[4,:,:],rte_invs_noth[8,:,:],svmin=[0,0,0,-6.],svmax=[1200,180,180,+6.],title=['Field strengh [Gauss]','Field inclination [degree]','Field azimuth [degree]','LoS velocity [km/s]'],xlabel='Pixel',ylabel='Pixel')#,save=outfile+'_VLoS.png')
plib.show_four_row(rte_invs[2,:,:],rte_invs[3,:,:],rte_invs[4,:,:],rte_invs[8,:,:],svmin=[0,0,0,-6.],svmax=[1200,180,180,+6.],title=['Field strengh [Gauss]','Field inclination [degree]','Field azimuth [degree]','LoS velocity [km/s]'],xlabel='Pixel',ylabel='Pixel')#,save=outfile+'BLoS.png')
rte_invs_noth[8,:,:] = rte_invs_noth[8,:,:] - np.mean(rte_invs_noth[8,rry[0]:rry[1],rrx[0]:rrx[1]])
rte_invs[8,:,:] = rte_invs[8,:,:] - np.mean(rte_invs[8,rry[0]:rry[1],rrx[0]:rrx[1]])
#np.savez_compressed(output_dir+'npz/'+outfile_L2+'_RTE', rte_invs=rte_invs, rte_invs_noth=rte_invs_noth,mask=mask)
b_los = rte_invs_noth[2,:,:]*np.cos(rte_invs_noth[3,:,:]*np.pi/180.)*mask
b_los = rte_invs_noth[2,:,:]*np.cos(rte_invs_noth[3,:,:]*np.pi/180.)*mask
b_los = rte_invs_noth[2,:,:]*np.cos(rte_invs_noth[3,:,:]*np.pi/180.)*mask
b_los = rte_invs_noth[2,:,:]*np.cos(rte_invs_noth[3,:,:]*np.pi/180.)*mask
# b_los = np.zeros((2048,2048))
# b_los[PXBEG2:PXEND2+1,PXBEG1:PXEND1+1] = b_los_cropped
v_los = rte_invs_noth[8,:,:] * mask
# v_los = np.zeros((2048,2048))
# v_los[PXBEG2:PXEND2+1,PXBEG1:PXEND1+1] = v_los_cropped
if verbose:
plib.show_one(v_los,vmin=-2.5,vmax=2.5,title='LoS velocity')
plib.show_one(b_los,vmin=-30,vmax=30,title='LoS magnetic field')
printc(' ---- >>>>> Updating L2 header.... ',color=bcolors.OKGREEN)
header['history'] = ' RTE CMILOS INVERTER: '+ rte
header['history'] = ' CMILOS VER: '+ version_cmilos
if 'RTE_ITER' in header: # Check for existence
header['RTE_ITER'] = str(15)
else:
header.set('RTE_ITER', str(15), 'Number RTE inversion iterations',after='CAL_SCIP')
printc(' ---- >>>>> Saving L2 data.... ',color=bcolors.OKGREEN)
with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = rte_invs_noth[2,:,:] * mask
# header = hdu_list[0].header
hdu_list[0].header = header
writeto = set_level(outfile_L2,'stokes','bmag')
hdu_list.writeto(output_dir+'level2/'+writeto, clobber=True)
# with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = rte_invs_noth[3,:,:] * mask
# hdu_list[0].header = header
writeto = set_level(outfile_L2,'stokes','binc')
hdu_list.writeto(output_dir+'level2/'+writeto, clobber=True)
# with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = rte_invs[4,:,:] * mask
# hdu_list[0].header = header
writeto = set_level(outfile_L2,'stokes','bazi')
hdu_list.writeto(output_dir+'level2/'+writeto, clobber=True)
# with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = b_los
# hdu_list[0].header = header
writeto = set_level(outfile_L2,'stokes','blos')
hdu_list.writeto(output_dir+'level2/'+writeto, clobber=True)
# with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = v_los
# hdu_list[0].header = header
writeto = set_level(outfile_L2,'stokes','vlos')
hdu_list.writeto(output_dir+'level2/'+writeto, clobber=True)
# with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = rte_invs[9,:,:]+rte_invs[10,:,:]
# hdu_list[0].header = header
writeto = set_level(outfile_L2,'stokes','icnt')
hdu_list.writeto(output_dir+'level2/'+writeto, clobber=True)
printc(' ---- >>>>> Saving plots.... ',color=bcolors.OKGREEN)
#-----------------
# PLOTS VLOS
#-----------------
Zm = np.ma.masked_where(mask == 1, mask)
plt.figure(figsize=(10, 10))
ax = plt.gca()
plt.title('PHI-FDT LoS velocity',size=20)
# Hide grid lines
ax.grid(False)
# Hide axes ticks
ax.set_xticks([])
ax.set_yticks([])
# im = ax.imshow(np.fliplr(rotate(v_los[PXBEG2:PXEND2+1,PXBEG1:PXEND1+1], 52, reshape=False)), cmap='bwr',vmin=-3.,vmax=3.)
im = ax.imshow(v_los, cmap='bwr',vmin=-3.,vmax=3.)
divider = plib.make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plib.plt.colorbar(im, cax=cax)
cbar.set_label('[km/s]')
cbar.ax.tick_params(labelsize=16)
#ax.imshow(Zm, cmap='gray')
writeto = set_level(outfile_L2,'stokes','vlos')
writeto = set_level(writeto,'.fits','.png')
plt.savefig(output_dir+'pngs/'+writeto,dpi=300)
plt.close()
#-----------------
# PLOTS BLOS
#-----------------
plt.figure(figsize=(10, 10))
ax = plt.gca()
plt.title('PHI-FDT Magnetogram',size=20)
# Hide grid lines
ax.grid(False)
# Hide axes ticks
ax.set_xticks([])
ax.set_yticks([])
#im = ax.imshow(np.fliplr(rotate(b_los[PXBEG2:PXEND2+1,PXBEG1:PXEND1+1], 52, reshape=False)), cmap='gray',vmin=-100,vmax=100)
im = ax.imshow(b_los, cmap='gray',vmin=-100,vmax=100)
divider = plib.make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plib.plt.colorbar(im, cax=cax)
# cbar.set_label('Stokes V amplitude [%]')
cbar.set_label('LoS magnetic field [Mx/cm$^2$]')
cbar.ax.tick_params(labelsize=16)
#ax.imshow(Zm, cmap='gray')
writeto = set_level(outfile_L2,'stokes','blos')
writeto = set_level(writeto,'.fits','.png')
plt.savefig(output_dir+'pngs/'+writeto,dpi=300)
plt.close()
printc('--------------------- END ----------------------------',color=bcolors.FAIL)
return wave_axis
# if rte == 'cog':
# printc('---------------------RUNNING COG --------------------------',color=bcolors.OKGREEN)
# wavelength = 6173.3356
# v_los,b_los = cog(data,wavelength,wave_axis,lande_factor=3,cpos = cpos)
# #-----------------
# # MASK DATA AND SAVE
# #-----------------
# v_los = v_los * mask
# b_los = b_los * mask
# plib.show_one(v_los,vmin=-1.5,vmax=1.5)
# plib.show_one(b_los,vmin=-150,vmax=150)
# if verbose == 1:
# plib.show_one(v_los,vmin=-2.5,vmax=2.5)
# plib.show_one(b_los,vmin=-150,vmax=150)
# with pyfits.open(data_f) as hdu_list:
# hdu_list[0].data = v_los
# hdu_list[0].header = header
# writeto = set_level(outfile_L2,'ilam','vlos-cog')
# writeto = set_level(writeto,'.fits','.png')
# plt.savefig(directory+writeto,dpi=300)
# with pyfits.open(data_f) as hdu_list:
# hdu_list[0].data = b_los
# hdu_list[0].header = header
# writeto = set_level(outfile_L2,'ilam','blos-cog')
# writeto = set_level(writeto,'.fits','.png')
# plt.savefig(directory+'pngs/'+writeto,dpi=300)
# return
#-----------------
# CORRECT CAVITY
#-----------------
# try:
# if cavity == 0:
# cavity=np.zeros_like(vl)
# print("zerooooooooooo")
# except:
# # cavity = cavity[ry[0]:ry[1],rx[0]:rx[1]]
# pass
# factor = 0.5
# rrx = [int(c[0]-r*factor),int(c[0]+r*factor)]
# rry = [int(c[1]-r*factor),int(c[1]+r*factor)]
# print(rrx,rry,' check these for los vel calib')
# off = np.mean(vl[rry[0]:rry[1],rrx[0]:rrx[1]])
# vl = vl - off #- cavity
# print('velocity offset ',off)
# # cavity,h = phi.fits_read('HRT_cavity_map_IP5.fits')
# # cavity = cavity * 0.3513e-3/6173.*300000. #A/V
# # cavity = cavity - np.median(cavity[800:1200,800:1200])
# return vl
|
py | 1a33a91cc5815ada5d4de6944da935c91c0c43bb | from daemon.initialisation import *
#################################
#
# DAEMON du projet
#
######################################
print(lobjet["LED"].getallfunction())
lobjet["PIR"].saveJSON()
lobjet["PIR"].autoloadJSON()
lobjet["LED"].setparamvalue("Mode",lobjet["LED"].getallmode()[0])
#TEST DES FUNCTION AVEC PARAMETRE
print(lobjet["LED"].getattributfunction("Allumer"))
lobjet["LED"].setfunction("Allumer",{"Time":10})
lobjet["LED"].setfunction("Allumer",{"Time":200})
#GESTION DE LA ROUTE
lobjet["LED"].addobjet(lobjet["PIR"])
#lobjet["LED"].removeobjet('PIR')
for objet in lobjet.values():
objetname = objet.getname()
print("********" + objetname + "**********")
print(objet.getallobjet())
print(objet.getallrules())
print("-------------------")
for param in lobjet[objetname].getallparam():
print(param)
print(lobjet[objetname].getparamvalue(param))
print(lobjet[objetname].getparamJSONfilename(param))
print("--------------------")
for rule in lobjet[objetname].getallrules():
print(rule)
print(lobjet[objetname].isSetRule(rule))
#test des objets
#for objet in lobjet.values():
# objet.loadJSON()
# objet.run()
# print(objet.getmode())
# objet.setmode(2)
# print(objet.getmode())
# objet.saveJSON()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.