content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from setuptools import setup, find_packages
setup(
name = "pierre",
version = "1.0.0",
py_modules = ["pierre"],
install_requires = [
"Click",
"mistune",
],
package_dir = {"": "pierre"},
entry_points = {
"console_scripts": [
"pierre = pierre:main"
]
},
)
| 18.210526 | 43 | 0.482659 | [
"Unlicense"
] | CoryMcCartan/pierre | setup.py | 346 | Python |
# Copyright (c) 2019 Bernd Wiesner. [email protected]
# All rights reseArgumentS_Resultsed
#
""" Display the command line options in a window
"""
from argparse import Namespace
from typing import Tuple, Union
import PySimpleGUI as sg
import constants as C
from gui_utility import popup_window
ArgumentsResults = Tuple[Union[str, None], Namespace]
def arguments_window(args: Namespace) -> ArgumentsResults:
"""Window interface
:param args: the arguments passed from the command line
:return: Tuple[Union[str, None], Namespace] - The new arguments
"""
filename: str = C.SAVE_FILE_DIR + args.lottery_type + C.SAVE_FILE_TYPE
layout = [
[
sg.Text(text="Lottery type:"),
sg.InputCombo(
values=tuple(C.LOTTERY_TYPES),
default_value=args.lottery_type,
readonly=True,
enable_events=True,
size=(10, 1),
tooltip="Choose a lottery type",
key=C.ELEMENT_NAMES["LOTTO"],
),
sg.Frame(
layout=[
[
sg.Text(text="Number of lines"),
sg.InputText(
default_text=args.number_of_lines,
enable_events=True,
size=(3, 1),
justification="right",
key=C.ELEMENT_NAMES["COUNT"],
),
]
],
title="",
tooltip="Choose the number of lines to generate",
relief=sg.RELIEF_FLAT,
key=C.ELEMENT_NAMES["LINES"],
),
],
[
sg.Frame(
layout=[
[
sg.Radio(
text="Save",
group_id="R",
default=not args.no_save,
tooltip="Save the generated numbers",
enable_events=True,
key=C.ELEMENT_NAMES["SAVE"],
),
sg.Radio(
text="Do NOT save",
group_id="R",
default=args.no_save,
tooltip="Do not save the generated numbers",
enable_events=True,
key=C.ELEMENT_NAMES["NOSAVE"],
),
sg.Radio(
text="Delete",
group_id="R",
default=args.delete,
enable_events=True,
tooltip="Delete a saved file",
key=C.ELEMENT_NAMES["DELETE"],
),
sg.Radio(
text="Show",
group_id="R",
default=args.print,
tooltip="Display a previously saved file",
enable_events=True,
key=C.ELEMENT_NAMES["SHOW"],
),
]
],
title="Saved file options",
relief=sg.RELIEF_SOLID,
size=(0, 40),
)
],
[
sg.Text(
text="File name: " + filename,
key=C.ELEMENT_NAMES["FILENAME"],
size=(50, 2),
tooltip="The name of the file to save or to display",
justification="left",
)
],
[
sg.OK(key="OK", focus=True),
sg.Quit(key="Cancel", tooltip="Do nothing and quit"),
],
]
window = sg.Window(
title="Lottery number Generator Arguments",
layout=layout,
text_justification=C.GUI_JUSTIFY,
font=(C.GUI_FONT_NAME, C.GUI_FONT_SIZE),
)
while True:
event, values = window.Read()
if event == C.ELEMENT_NAMES["DELETE"]:
window.Element(key="OK").Update("Delete Saved File")
window.Element(key=C.ELEMENT_NAMES["LINES"]).Update(visible=False)
window.Element(key=C.ELEMENT_NAMES["FILENAME"]).Update(
"File to delete: " + filename
)
elif event == C.ELEMENT_NAMES["SHOW"]:
window.Element(key="OK").Update("Show Saved File")
window.Element(key=C.ELEMENT_NAMES["LINES"]).Update(visible=False)
window.Element(key=C.ELEMENT_NAMES["FILENAME"]).Update(
"File to display: " + filename
)
elif event in (C.ELEMENT_NAMES["NOSAVE"], C.ELEMENT_NAMES["SAVE"]):
window.Element(key="OK").Update("Generate Numbers")
window.Element(key=C.ELEMENT_NAMES["LINES"]).Update(visible=True)
if event == C.ELEMENT_NAMES["NOSAVE"]:
window.Element(key=C.ELEMENT_NAMES["FILENAME"]).Update(
"File will not be saved"
)
elif event == C.ELEMENT_NAMES["SAVE"]:
window.Element(key=C.ELEMENT_NAMES["FILENAME"]).Update(
"Will be saved as: " + filename
)
if event == C.ELEMENT_NAMES["LOTTO"]:
filename = (
C.SAVE_FILE_DIR + values[C.ELEMENT_NAMES["LOTTO"]] + C.SAVE_FILE_TYPE
)
window.Element(key=C.ELEMENT_NAMES["FILENAME"]).Update(
"File name: " + filename
)
elif event == C.ELEMENT_NAMES["COUNT"]:
if values[C.ELEMENT_NAMES["COUNT"]].isnumeric():
temp = int(values[C.ELEMENT_NAMES["COUNT"]])
else:
temp = False
if temp < C.MIN_LINES or temp > C.MAX_LINES:
elem = window.Element(key=C.ELEMENT_NAMES["COUNT"])
elem.Update(C.DEFAULT_LINES)
msg = "number of lines must be in the range 1-100"
popup_window(text=msg)
elif event == "OK" or event == "Cancel" or event is None:
break
if event != "Cancel" and event is not None:
args.lottery_type = values[C.ELEMENT_NAMES["LOTTO"]] # str
args.number_of_lines = int(values[C.ELEMENT_NAMES["COUNT"]]) # int
args.delete = values[C.ELEMENT_NAMES["DELETE"]] # bool
args.print = values[C.ELEMENT_NAMES["SHOW"]] # bool
args.no_save = values[C.ELEMENT_NAMES["NOSAVE"]] # bool
window.Close()
return event, args
| 38.068182 | 85 | 0.468507 | [
"MIT"
] | bernduwiesner/GenLottery | gui_arguments.py | 6,700 | Python |
import sys, java, unittest
import xpath
from xml.dom import minidom as dom
from geoscript import geom, proj, feature
try:
import json
except ImportError:
import simplejson as json
class Feature_Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.xpathctx = xpath.XPathContext()
cls.xpathctx.namespaces['gml'] = 'http://www.opengis.net/gml'
cls.xpathctx.namespaces['gsf'] = 'http://geoscript.org/feature'
def testSchemaBasic(self):
atts = [('att1',str),('att2',int),('geom',geom.Point)]
s = feature.Schema('test',atts)
assert 'test' == s.name
n = 0
for att in s.fields:
assert atts[n][0] == att.name
assert atts[n][1] == att.typ
n = n+1
assert len(atts) == n
assert atts[2][0] == s.geom.name
assert atts[2][1] == s.geom.typ
def testSchemaGeomProj(self):
prj = proj.Projection('epsg:3005')
s = feature.Schema('test',[('att1',str),('att2',int),('geom', geom.Point,'epsg:3005')])
assert s.geom.proj
assert prj == s.geom.proj
def testSchemaReproject(self):
prj1 = proj.Projection('epsg:4326')
prj2 = proj.Projection('epsg:3005')
s = feature.Schema('test',[('geom', geom.Point,'epsg:4326')])
assert s.fields[0].proj == prj1
s = s.reproject(prj2, 'reprojected')
assert 'reprojected' == s.name
assert s.fields[0].proj == prj2
def testSchemaAsContainer(self):
s = feature.Schema('test',[('att1',str),('att2',int),('geom', geom.Point,'epsg:3005')])
assert s['att1']
assert s['att1'].typ == str
expected = [f.name for f in s.fields]
assert [name for name in s] == expected
expected = [f for f in s.fields]
assert [f for name, f in s.iteritems()] == expected
try:
s['foo']
assert False
except KeyError:
pass
assert s.values() == s.fields
assert s.keys() == [f.name for f in s.fields]
def testSchemaEquals(self):
s1 = feature.Schema('test',[('att1',str),('att2',int),('geom', geom.Point,'epsg:3005')])
s2 = feature.Schema('test',[('att1',str),('att2',int),('geom', geom.Point,'epsg:3005')])
assert s1 == s2
def testBasic(self):
id = 'fid'
g = geom.Point(-125, 50)
a = {'x': 1, 'y': 1.1, 'z': 'one', 'geom': g}
f = feature.Feature(a,'fid')
assert id == f.id
assert g == f.geom
def testAsContainer(self):
id = 'fid'
g = geom.Point(-125, 50)
a = {'x': 1, 'y': 1.1, 'z': 'one', 'geom': g}
f = feature.Feature(a,'fid')
assert 1 == f['x']
try:
f['foo']
assert False
except KeyError:
pass
assert [x for x in f.schema] == [y for y in f]
expected = [(x,y) for x,y in f.attributes.iteritems()]
assert [(x,y) for x,y in f.iteritems()] == expected
assert sorted(f.attributes.keys()) == sorted(f.keys())
assert sorted(f.attributes.values()) == sorted(f.values())
def testEquals(self):
id = 'fid'
g = geom.Point(-125, 50)
a = {'x': 1, 'y': 1.1, 'z': 'one', 'geom': g}
f1 = feature.Feature(a,'fid')
f2 = feature.Feature(a,'fid')
assert f1 == f2
def testBounds(self):
g = geom.LineString((1,1), (10,10))
f = feature.Feature({'geom': g}, 'fid')
assert 1 == f.bounds.west
assert 1 == f.bounds.south
assert 10 == f.bounds.east
assert 10 == f.bounds.north
def testBoundsNoGeom(self):
f = feature.Feature({'x': 1}, 'fid')
assert None == f.bounds
def testWriteJSON(self):
g = geom.Point(-125, 50)
a = {'x': 1, 'y': 1.1, 'z': 'one', 'geom': g}
f1 = feature.Feature(a,'fid')
st = feature.writeJSON(f1)
assert st
obj = json.loads(st)
assert obj['type'] == 'Feature'
assert obj['properties']['x'] == 1
assert obj['properties']['y'] == 1.1
assert obj['geometry']['type'] == 'Point'
assert obj['geometry']['coordinates'] == [-125, 50]
def testReadJSON(self):
st = '{"type": "Feature", "properties": {"x": 1, "y": 1.1 }, "id": "fid"}'
f = feature.readJSON(st)
assert f
assert 1 == f['x']
assert 1.1 == f['y']
assert 'fid' == f.id
def testWriteGML(self):
g = geom.Point(-125, 50)
a = {'x': 1, 'y': 1.1, 'z': 'one', 'geom': g}
xml = feature.writeGML(feature.Feature(a,'fid'))
doc = dom.parseString(xml)
assert "gsf:feature" == doc.documentElement.nodeName
xp = Feature_Test.xpathctx
assert u'1' in xp.findvalues('//gsf:x', doc)
assert u'1.1' in xp.findvalues('//gsf:y', doc)
assert u'one' in xp.findvalues('//gsf:z', doc)
assert -125.0 == float(xp.findvalues('//gsf:geom/gml:Point/gml:coord/gml:X', doc)[0] )
assert 50.0 == float(xp.findvalues('//gsf:geom/gml:Point/gml:coord/gml:Y', doc)[0] )
def testReadGML(self):
xml = u'<gsf:feature fid="fid" xmlns:gml="http://www.opengis.net/gml" xmlns:gsf="http://geoscript.org/feature"><gsf:geom><gml:Point><gml:coord><gml:X>-125.0</gml:X><gml:Y>50.0</gml:Y></gml:coord></gml:Point></gsf:geom><gsf:x>1</gsf:x><gsf:z>one</gsf:z><gsf:y>1.1</gsf:y></gsf:feature>'
f = feature.readGML(xml)
assert f
assert u'1' == f['x']
assert u'1.1' == f['y']
assert u'fid' == f.id
assert -125.0 == f.geom.x and 50.0 == f.geom.y
xml = '<gsf:feature gml:id="fid" xmlns:gml="http://www.opengis.net/gml" xmlns:gsf="http://geoscript.org/feature"><gsf:geom><gml:Point><gml:pos>-125.0 50.0</gml:pos></gml:Point></gsf:geom><gsf:x>1</gsf:x><gsf:z>one</gsf:z><gsf:y>1.1</gsf:y></gsf:feature>'
f = feature.readGML(xml,ver=3)
assert f
assert u'1' == f['x']
assert u'1.1' == f['y']
assert u'fid' == f.id
assert -125.0 == f.geom.x and 50.0 == f.geom.y
xml = '<gsf:feature gml:id="fid" xmlns:gml="http://www.opengis.net/gml/3.2" xmlns:gsf="http://geoscript.org/feature"><gsf:geom><gml:Point><gml:pos>-125.0 50.0</gml:pos></gml:Point></gsf:geom><gsf:x>1</gsf:x><gsf:z>one</gsf:z><gsf:y>1.1</gsf:y></gsf:feature>'
f = feature.readGML(xml,ver=3.2)
assert f
assert u'1' == f['x']
assert u'1.1' == f['y']
assert u'fid' == f.id
assert -125.0 == f.geom.x and 50.0 == f.geom.y
def testWriteGML3(self):
g = geom.Point(-125, 50)
a = {'x': 1, 'y': 1.1, 'z': 'one', 'geom': g}
xml = feature.writeGML(feature.Feature(a,'fid'), ver=3)
doc = dom.parseString(xml)
assert "gsf:feature" == doc.documentElement.nodeName
xp = Feature_Test.xpathctx
assert u'1' in xp.findvalues('//gsf:x', doc)
assert u'1.1' in xp.findvalues('//gsf:y', doc)
assert u'one' in xp.findvalues('//gsf:z', doc)
self.assertIn(u'-125 50', xp.findvalues('//gsf:geom/gml:Point/gml:pos', doc))
def testWriteGML32(self):
g = geom.Point(-125, 50)
a = {'x': 1, 'y': 1.1, 'z': 'one', 'geom': g}
xml = feature.writeGML(feature.Feature(a,'fid'), ver=3.2)
doc = dom.parseString(xml)
assert "gsf:feature" == doc.documentElement.nodeName
xp = Feature_Test.xpathctx
xp.namespaces['gml'] = 'http://www.opengis.net/gml/3.2'
assert u'1' in xp.findvalues('//gsf:x', doc)
assert u'1.1' in xp.findvalues('//gsf:y', doc)
assert u'one' in xp.findvalues('//gsf:z', doc)
self.assertIn(u'-125 50', xp.findvalues('//gsf:geom/gml:Point/gml:pos', doc))
| 33.259259 | 289 | 0.58686 | [
"MIT"
] | geoscript/geoscript-py | tests/test_feature.py | 7,184 | Python |
import itertools as it
import re
import string
import warnings
from xml.dom import minidom
from manimlib.constants import *
from manimlib.mobject.geometry import Circle
from manimlib.mobject.geometry import Rectangle
from manimlib.mobject.geometry import RoundedRectangle
from manimlib.mobject.types.vectorized_mobject import VGroup
from manimlib.mobject.types.vectorized_mobject import VMobject
from manimlib.utils.color import *
from manimlib.utils.config_ops import digest_config
from manimlib.utils.config_ops import digest_locals
def string_to_numbers(num_string):
num_string = num_string.replace("-", ",-")
num_string = num_string.replace("e,-", "e-")
return [float(s) for s in re.split("[ ,]", num_string) if s != ""]
class SVGMobject(VMobject):
CONFIG = {
"should_center": True,
"height": 2,
"width": None,
# Must be filled in in a subclass, or when called
"file_name": None,
"unpack_groups": True, # if False, creates a hierarchy of VGroups
"stroke_width": DEFAULT_STROKE_WIDTH,
"fill_opacity": 1.0,
# "fill_color" : LIGHT_GREY,
}
def __init__(self, file_name=None, **kwargs):
digest_config(self, kwargs)
self.file_name = file_name or self.file_name
self.ensure_valid_file()
VMobject.__init__(self, **kwargs)
self.move_into_position()
def ensure_valid_file(self):
if self.file_name is None:
raise Exception("Must specify file for SVGMobject")
possible_paths = [
os.path.join(os.path.join("assets", "svg_images"), self.file_name),
os.path.join(os.path.join("assets", "svg_images"),
self.file_name + ".svg"),
os.path.join(os.path.join("assets", "svg_images"),
self.file_name + ".xdv"),
self.file_name,
]
for path in possible_paths:
if os.path.exists(path):
self.file_path = path
return
raise IOError("No file matching %s in image directory" %
self.file_name)
def generate_points(self):
doc = minidom.parse(self.file_path)
self.ref_to_element = {}
for svg in doc.getElementsByTagName("svg"):
mobjects = self.get_mobjects_from(svg)
if self.unpack_groups:
self.add(*mobjects)
else:
self.add(*mobjects[0].submobjects)
doc.unlink()
def get_mobjects_from(self, element):
result = []
if not isinstance(element, minidom.Element):
return result
if element.tagName == 'defs':
self.update_ref_to_element(element)
elif element.tagName == 'style':
pass # TODO, handle style
elif element.tagName in ['g', 'svg', 'symbol']:
result += it.chain(*[
self.get_mobjects_from(child) for child in element.childNodes
])
elif element.tagName == 'path':
result.append(
self.path_string_to_mobject(element.getAttribute('d')))
elif element.tagName == 'use':
result += self.use_to_mobjects(element)
elif element.tagName == 'rect':
result.append(self.rect_to_mobject(element))
elif element.tagName == 'circle':
result.append(self.circle_to_mobject(element))
elif element.tagName == 'ellipse':
result.append(self.ellipse_to_mobject(element))
elif element.tagName in ['polygon', 'polyline']:
result.append(self.polygon_to_mobject(element))
else:
pass # TODO
# warnings.warn("Unknown element type: " + element.tagName)
result = [m for m in result if m is not None]
self.handle_transforms(element, VGroup(*result))
if len(result) > 1 and not self.unpack_groups:
result = [VGroup(*result)]
return result
def g_to_mobjects(self, g_element):
mob = VGroup(*self.get_mobjects_from(g_element))
self.handle_transforms(g_element, mob)
return mob.submobjects
def path_string_to_mobject(self, path_string):
return VMobjectFromSVGPathstring(path_string)
def use_to_mobjects(self, use_element):
# Remove initial "#" character
ref = use_element.getAttribute("xlink:href")[1:]
if ref not in self.ref_to_element:
warnings.warn("%s not recognized" % ref)
return VGroup()
return self.get_mobjects_from(self.ref_to_element[ref])
def attribute_to_float(self, attr):
stripped_attr = "".join(
[char for char in attr if char in string.digits + "." + "-"])
return float(stripped_attr)
def polygon_to_mobject(self, polygon_element):
# TODO, This seems hacky...
path_string = polygon_element.getAttribute("points")
for digit in string.digits:
path_string = path_string.replace(" " + digit, " L" + digit)
path_string = "M" + path_string
return self.path_string_to_mobject(path_string)
# <circle class="st1" cx="143.8" cy="268" r="22.6"/>
def circle_to_mobject(self, circle_element):
x, y, r = [
self.attribute_to_float(circle_element.getAttribute(key))
if circle_element.hasAttribute(key) else 0.0
for key in ("cx", "cy", "r")
]
return Circle(radius=r).shift(x * RIGHT + y * DOWN)
def ellipse_to_mobject(self, circle_element):
x, y, rx, ry = [
self.attribute_to_float(circle_element.getAttribute(key))
if circle_element.hasAttribute(key) else 0.0
for key in ("cx", "cy", "rx", "ry")
]
return Circle().scale(rx * RIGHT + ry * UP).shift(x * RIGHT + y * DOWN)
def rect_to_mobject(self, rect_element):
fill_color = rect_element.getAttribute("fill")
stroke_color = rect_element.getAttribute("stroke")
stroke_width = rect_element.getAttribute("stroke-width")
corner_radius = rect_element.getAttribute("rx")
# input preprocessing
if fill_color in ["", "none", "#FFF", "#FFFFFF"
] or Color(fill_color) == Color(WHITE):
opacity = 0
fill_color = BLACK # shdn't be necessary but avoids error msgs
if fill_color in ["#000", "#000000"]:
fill_color = WHITE
if stroke_color in ["", "none", "#FFF", "#FFFFFF"
] or Color(stroke_color) == Color(WHITE):
stroke_width = 0
stroke_color = BLACK
if stroke_color in ["#000", "#000000"]:
stroke_color = WHITE
if stroke_width in ["", "none", "0"]:
stroke_width = 0
if corner_radius in ["", "0", "none"]:
corner_radius = 0
corner_radius = float(corner_radius)
if corner_radius == 0:
mob = Rectangle(width=self.attribute_to_float(
rect_element.getAttribute("width")),
height=self.attribute_to_float(
rect_element.getAttribute("height")),
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
fill_opacity=opacity)
else:
mob = RoundedRectangle(width=self.attribute_to_float(
rect_element.getAttribute("width")),
height=self.attribute_to_float(
rect_element.getAttribute("height")),
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
fill_opacity=opacity,
corner_radius=corner_radius)
mob.shift(mob.get_center() - mob.get_corner(UP + LEFT))
return mob
def handle_transforms(self, element, mobject):
x, y = 0, 0
try:
x = self.attribute_to_float(element.getAttribute('x'))
# Flip y
y = -self.attribute_to_float(element.getAttribute('y'))
mobject.shift(x * RIGHT + y * UP)
except:
pass
transform = element.getAttribute('transform')
try: # transform matrix
prefix = "matrix("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(
suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
transform = string_to_numbers(transform)
transform = np.array(transform).reshape([3, 2])
x = transform[2][0]
y = -transform[2][1]
matrix = np.identity(self.dim)
matrix[:2, :2] = transform[:2, :]
matrix[1] *= -1
matrix[:, 1] *= -1
for mob in mobject.family_members_with_points():
mob.points = np.dot(mob.points, matrix)
mobject.shift(x * RIGHT + y * UP)
except:
pass
try: # transform scale
prefix = "scale("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(
suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
scale_values = string_to_numbers(transform)
if len(scale_values) == 2:
scale_x, scale_y = scale_values
mobject.scale(np.array([scale_x, scale_y, 1]),
about_point=ORIGIN)
elif len(scale_values) == 1:
scale = scale_values[0]
mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN)
except:
pass
try: # transform translate
prefix = "translate("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(
suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
x, y = string_to_numbers(transform)
mobject.shift(x * RIGHT + y * DOWN)
except:
pass
# TODO, ...
def flatten(self, input_list):
output_list = []
for i in input_list:
if isinstance(i, list):
output_list.extend(self.flatten(i))
else:
output_list.append(i)
return output_list
def get_all_childNodes_have_id(self, element):
all_childNodes_have_id = []
if not isinstance(element, minidom.Element):
return
if element.hasAttribute('id'):
return [element]
for e in element.childNodes:
all_childNodes_have_id.append(self.get_all_childNodes_have_id(e))
return self.flatten([e for e in all_childNodes_have_id if e])
def update_ref_to_element(self, defs):
new_refs = dict([(e.getAttribute('id'), e)
for e in self.get_all_childNodes_have_id(defs)])
self.ref_to_element.update(new_refs)
def move_into_position(self):
if self.should_center:
self.center()
if self.height is not None:
self.set_height(self.height)
if self.width is not None:
self.set_width(self.width)
class VMobjectFromSVGPathstring(VMobject):
def __init__(self, path_string, **kwargs):
digest_locals(self)
VMobject.__init__(self, **kwargs)
def get_path_commands(self):
result = [
"M", # moveto
"L", # lineto
"H", # horizontal lineto
"V", # vertical lineto
"C", # curveto
"S", # smooth curveto
"Q", # quadratic Bezier curve
"T", # smooth quadratic Bezier curveto
"A", # elliptical Arc
"Z", # closepath
]
result += [s.lower() for s in result]
return result
def generate_points(self):
pattern = "[%s]" % ("".join(self.get_path_commands()))
pairs = list(
zip(re.findall(pattern, self.path_string),
re.split(pattern, self.path_string)[1:]))
# Which mobject should new points be added to
self = self
for command, coord_string in pairs:
self.handle_command(command, coord_string)
# people treat y-coordinate differently
self.rotate(np.pi, RIGHT, about_point=ORIGIN)
def handle_command(self, command, coord_string):
isLower = command.islower()
command = command.upper()
# new_points are the points that will be added to the curr_points
# list. This variable may get modified in the conditionals below.
points = self.points
new_points = self.string_to_points(coord_string)
if isLower and len(points) > 0:
new_points += points[-1]
if command == "M": # moveto
self.start_new_path(new_points[0])
if len(new_points) <= 1:
return
# Draw relative line-to values.
points = self.points
new_points = new_points[1:]
command = "L"
for p in new_points:
if isLower:
# Treat everything as relative line-to until empty
p[0] += self.points[-1, 0]
p[1] += self.points[-1, 1]
self.add_line_to(p)
return
elif command in ["L", "H", "V"]: # lineto
if command == "H":
new_points[0, 1] = points[-1, 1]
elif command == "V":
if isLower:
new_points[0, 0] -= points[-1, 0]
new_points[0, 0] += points[-1, 1]
new_points[0, 1] = new_points[0, 0]
new_points[0, 0] = points[-1, 0]
self.add_line_to(new_points[0])
return
if command == "C": # curveto
pass # Yay! No action required
elif command in ["S", "T"]: # smooth curveto
self.add_smooth_curve_to(*new_points)
# handle1 = points[-1] + (points[-1] - points[-2])
# new_points = np.append([handle1], new_points, axis=0)
return
elif command == "Q": # quadratic Bezier curve
# TODO, this is a suboptimal approximation
new_points = np.append([new_points[0]], new_points, axis=0)
elif command == "A": # elliptical Arc
raise Exception("Not implemented")
elif command == "Z": # closepath
return
# Add first three points
self.add_cubic_bezier_curve_to(*new_points[0:3])
# Handle situations where there's multiple relative control points
if len(new_points) > 3:
# Add subsequent offset points relatively.
for i in range(3, len(new_points), 3):
if isLower:
new_points[i:i + 3] -= points[-1]
new_points[i:i + 3] += new_points[i - 1]
self.add_cubic_bezier_curve_to(*new_points[i:i + 3])
def string_to_points(self, coord_string):
numbers = string_to_numbers(coord_string)
if len(numbers) % 2 == 1:
numbers.append(0)
num_points = len(numbers) // 2
result = np.zeros((num_points, self.dim))
result[:, :2] = np.array(numbers).reshape((num_points, 2))
return result
def get_original_path_string(self):
return self.path_string
| 37.949519 | 79 | 0.559574 | [
"MIT"
] | Tarang74/manim | manimlib/mobject/svg/svg_mobject.py | 15,787 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Zimeng Qiu <[email protected]>
"""
F19 11-411/611 NLP Assignment 3 Task 1
N-gram Language Model Implementation Script
Zimeng Qiu Sep 2019
This is a simple implementation of N-gram language model
Write your own implementation in this file!
"""
import argparse
from utils import *
import numpy as np
class LanguageModel(object):
"""
Base class for all language models
"""
def __init__(self, corpus, ngram, min_freq, uniform=False):
"""
Initialize language model
:param corpus: input text corpus to build LM on
:param ngram: number of n-gram, e.g. 1, 2, 3, ...
:param min_freq: minimum frequency threshold to set a word to UNK placeholder
set to 1 to not use this threshold
:param uniform: boolean flag, set to True to indicate this model is a simple uniform LM
otherwise will be an N-gram model
"""
# write your initialize code below
self.corpus = corpus
self.ngram = ngram
self.min_freq = min_freq
self.uniform = uniform
self.uniform_table = None
self.unigram_table = None
self.bigram_table = None
self.trigram_table = None
self.infrequent_words = find_infrequent_words(self.corpus,self.min_freq)
replace_infrequent_words(self.corpus,self.infrequent_words)
self.corpus_1gram,self.vocabulary,self.V,self.N = get_vocabulary(self.corpus)
self.word_to_idx,self.idx_to_word = get_word_mappings(self.vocabulary)
self.counter_1gram = get_counter(self.corpus_1gram)
self.build()
def build(self):
"""
Build LM from text corpus
"""
# Write your own implementation here
# uniform
if self.uniform:
self.uniform_table = get_uniform_tables(self.V)
else:
# unigram
if self.ngram == 1:
self.unigram_table = get_unigram_tables(self.V,self.N,self.counter_1gram,self.word_to_idx)
# bigram
elif self.ngram == 2:
self.corpus_2gram = [(self.corpus_1gram[i],self.corpus_1gram[i+1]) for i in range(len(self.corpus_1gram)-1)]
self.counter_2gram = get_counter(self.corpus_2gram)
self.bigram_table = get_bigram_tables(self.V,self.counter_1gram,self.counter_2gram,self.word_to_idx,self.idx_to_word)
# trigram
elif self.ngram == 3:
self.corpus_2gram = [(self.corpus_1gram[i],self.corpus_1gram[i+1]) for i in range(len(self.corpus_1gram)-1)]
self.counter_2gram = get_counter(self.corpus_2gram)
self.corpus_3gram = [(self.corpus_1gram[i],self.corpus_1gram[i+1],self.corpus_1gram[i+2]) for i in range(len(self.corpus_1gram)-2)]
self.counter_3gram = get_counter(self.corpus_3gram)
self.trigram_table = get_trigram_tables(self.V,self.counter_2gram,self.counter_3gram,self.word_to_idx)
def most_common_words(self, k):
"""
Return the top-k most frequent n-grams and their frequencies in sorted order.
For uniform models, the frequency should be "1" for each token.
Your return should be sorted in descending order of frequency.
Sort according to ascending alphabet order when multiple words have same frequency.
:return: list[tuple(token, freq)] of top k most common tokens
"""
# Write your own implementation here
if self.uniform:
return [(word,1) for word in sorted(self.vocabulary)[0:k]]
else:
if self.ngram == 1:
return sorted(self.counter_1gram.most_common(),key=lambda x:(-x[1],x[0]))[0:k]
elif self.ngram == 2:
return [(token[0]+' '+token[1],num) for token, num in sorted(self.counter_2gram.most_common(),key=lambda x:(-x[1],x[0]))[0:k]]
elif self.ngram == 3:
return [(token[0]+' '+token[1]+' '+token[2],num) for token,num in sorted(self.counter_3gram.most_common(),key=lambda x:(-x[1],x[0]))[0:k]]
return
def calculate_perplexity(models, coefs, data):
"""
Calculate perplexity with given model
:param models: language models
:param coefs: coefficients
:param data: test data
:return: perplexity
"""
# Write your own implementation here
pp = 0
uniform_prob = []
unigram_prob = []
bigram_prob = []
trigram_prob = []
prob_table_unifrom = None
prob_table_1gram = None
prob_table_2gram = None
prob_table_3gram = None
min_freq = models[0].min_freq
train_vocabulary = models[0].vocabulary
word_to_idx,idx_to_word = models[0].word_to_idx,models[0].idx_to_word
test_infrequent_words = find_infrequent_words(data,min_freq)
replace_infrequent_words(data,test_infrequent_words)
for i in range(len(data)):
for j in range(len(data[i])):
if data[i][j] not in train_vocabulary:
data[i][j] = 'UNK'
corpus_1gram,vocabulary,V,N = get_vocabulary(data)
corpus_2gram = [(corpus_1gram[i],corpus_1gram[i+1]) for i in range(len(corpus_1gram)-1)]
corpus_3gram = [(corpus_1gram[i],corpus_1gram[i+1],corpus_1gram[i+2]) for i in range(len(corpus_1gram)-2)]
for i in range(len(models)):
model = models[i]
if model.uniform:
prob_table_unifrom = model.uniform_table
for word in corpus_1gram:
uniform_prob.append(prob_table_unifrom[0][word_to_idx[word]]*coefs[0])
else:
if model.ngram == 1:
prob_table_1gram = model.unigram_table
for word in corpus_1gram:
unigram_prob.append(prob_table_1gram[0][word_to_idx[word]]*coefs[1])
elif model.ngram == 2:
prob_table_2gram = model.bigram_table
bigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_2gram[0][0]]])
for words in corpus_2gram:
word1 = words[0]
word2 = words[1]
prob_1gram = prob_table_1gram[0][word_to_idx[word2]]
prob_2gram = prob_table_2gram[word_to_idx[word1]][word_to_idx[word2]]
if prob_2gram != 0:
bigram_prob.append(prob_2gram*coefs[2])
else:
bigram_prob.append(prob_1gram*coefs[2])
elif model.ngram == 3:
prob_table_3gram = model.trigram_table
train_corpus_3gram = set(model.corpus_3gram)
trigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_3gram[0][0]]])
trigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_3gram[0][1]]])
for words in corpus_3gram:
word1 = words[0]
word2 = words[1]
word3 = words[2]
if words in train_corpus_3gram:
prob_3gram = prob_table_3gram[(word1,word2,word3)]
trigram_prob.append(prob_3gram*coefs[3])
else:
prob_1gram = prob_table_1gram[0][word_to_idx[word3]]
prob_2gram = prob_table_2gram[word_to_idx[word2]][word_to_idx[word3]]
if prob_2gram != 0:
trigram_prob.append(prob_2gram*coefs[3])
else:
trigram_prob.append(prob_1gram*coefs[3])
prob = np.zeros((N,),dtype=np.float64)
for i in range(len(prob)):
prob[i] += uniform_prob[i]
prob[i] += unigram_prob[i]
prob[i] += bigram_prob[i]
prob[i] += trigram_prob[i]
for p in prob:
pp += np.log2(p)
pp /= -N
pp = np.power(2,pp)
return pp
# Do not modify this function!
def parse_args():
"""
Parse input positional arguments from command line
:return: args - parsed arguments
"""
parser = argparse.ArgumentParser('N-gram Language Model')
parser.add_argument('coef_unif', help='coefficient for the uniform model.', type=float)
parser.add_argument('coef_uni', help='coefficient for the unigram model.', type=float)
parser.add_argument('coef_bi', help='coefficient for the bigram model.', type=float)
parser.add_argument('coef_tri', help='coefficient for the trigram model.', type=float)
parser.add_argument('min_freq', type=int,
help='minimum frequency threshold for substitute '
'with UNK token, set to 1 for not use this threshold')
parser.add_argument('testfile', help='test text file.')
parser.add_argument('trainfile', help='training text file.', nargs='+')
return parser.parse_args()
# Main executable script provided for your convenience
# Not executed on autograder, so do what you want
if __name__ == '__main__':
# parse arguments
args = parse_args()
# load and preprocess train and test data
train = preprocess(load_dataset(args.trainfile))
test = preprocess(read_file(args.testfile))
# build language models
uniform = LanguageModel(train, ngram=1, min_freq=args.min_freq, uniform=True)
unigram = LanguageModel(train, ngram=1, min_freq=args.min_freq)
# print('Unique 1-gram types:',len(unigram.counter_1gram.most_common()))
# print('top 15 unigram:',unigram.counter_1gram.most_common()[:15])
bigram = LanguageModel(train, ngram=2, min_freq=args.min_freq)
# print('Unique 2-gram types:',len(bigram.counter_2gram.most_common()))
# print('top 15 bigram:',bigram.counter_2gram.most_common()[:15])
trigram = LanguageModel(train, ngram=3, min_freq=args.min_freq)
# print('Unique 3-gram types:',len(trigram.counter_3gram.most_common()))
# print('top 15 trigram:',trigram.counter_3gram.most_common()[:50])
# calculate perplexity on test file
ppl = calculate_perplexity(
models=[uniform, unigram, bigram, trigram],
coefs=[args.coef_unif, args.coef_uni, args.coef_bi, args.coef_tri],
data=test)
print("Perplexity: {}".format(ppl))
| 40.117647 | 154 | 0.623851 | [
"MIT"
] | alvisdeng/NLP-Language-Model | lm.py | 10,230 | Python |
"""
---
title: Compressive Transformer Experiment
summary: This experiment trains a compressive transformer model on tiny Shakespeare dataset.
---
# Compressive Transformer Experiment
This is an annotated PyTorch experiment to train a compressive transformer model.
"""
from typing import List, Tuple, NamedTuple
import torch
import torch.nn as nn
from labml import experiment, tracker, monit, logger
from labml.configs import option
from labml.logger import Text
from labml_helpers.metrics.simple_state import SimpleStateModule
from labml_helpers.module import Module
from labml_helpers.train_valid import BatchIndex, hook_model_outputs
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.transformers.compressive import CompressiveTransformer, AttentionReconstructionLoss, \
CompressiveTransformerLayer, Conv1dCompression
class CompressedMemory(NamedTuple):
mem: List[torch.Tensor]
c_mem: List[torch.Tensor]
class AutoregressiveModel(Module):
"""
## Auto regressive model
"""
def __init__(self, n_vocab: int, d_model: int, transformer: CompressiveTransformer):
super().__init__()
# Token embedding module
self.src_embed = nn.Embedding(n_vocab, d_model)
# Transformer
self.transformer = transformer
# Final layer
self.generator = nn.Linear(d_model, n_vocab)
# Masks
self.mask_x = None
self.mask_mem = None
def forward(self, x: torch.Tensor, mem: CompressedMemory):
# Get memory and compressed memory
if mem is not None:
mem, c_mem = mem.mem, mem.c_mem
else:
mem = []
c_mem = []
# Total length of the memory and compressed memory (for masks)
m_len = len(mem[0]) if mem else 0
if c_mem:
m_len += len(c_mem[0])
# Create a subsequent mask for tokens
if self.mask_x is None or self.mask_x.shape[0] < len(x):
from labml_nn.transformers.utils import subsequent_mask
self.mask_x = subsequent_mask(len(x)).to(x.device)
# Create an all ones (full visibility) mask for memory
if self.mask_mem is None or self.mask_mem.shape[1] < m_len or self.mask_mem.shape[0] < len(x):
self.mask_mem = self.mask_x.new_ones(len(x), m_len, 1)
# Concatenate the masks if there is memory
if m_len:
mask = torch.cat((self.mask_mem[:len(x), :m_len], self.mask_x[:len(x), :len(x)]), dim=1)
# Use only the subsequent mask otherwise
else:
mask = self.mask_x[:len(x), :len(x)]
# Token embeddings
x = self.src_embed(x)
# Run it through the transformer
res, mem = self.transformer(x, mem, c_mem, mask)
# Generate logits of the next token
res = self.generator(res)
#
return res, mem
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
The default configurations can and will be overridden when we start the experiment.
"""
model: AutoregressiveModel
# Token embedding size
d_model: int = 128
# Number of attention heads
heads: int = 4
# Dropout probability
dropout: float = 0.0
# Number of features in FFN hidden layer
d_ff: int = 256
# Number of transformer layers
n_layers: int = 6
# Number of memories to keep
mem_len: int = 8
# State module to maintain memories when switching between training and validation
memory = SimpleStateModule()
# Attention Reconstruction Loss
attention_reconstruction_loss: AttentionReconstructionLoss
# Compression rate
compression_rate: int = 4
# Compressed memory length
c_mem_len: int = 128
def init(self):
# Set tracker configurations
tracker.set_scalar("accuracy.*", True)
tracker.set_scalar("loss.*", True)
# Do not print the attention reconstruction loss in the terminal
tracker.set_scalar("ar_loss.*", False)
# Add a hook to log module outputs
hook_model_outputs(self.mode, self.model, 'model')
# This will keep the accuracy metric stats and memories separate for training and validation.
self.state_modules = [self.accuracy, self.memory]
@torch.no_grad()
def merge_compress_memory(self, mem: CompressedMemory, new_mem: List[torch.Tensor]) \
-> Tuple[CompressedMemory, List[torch.Tensor]]:
"""
Concatenate new memories and compress the oldest memories.
"""
# If the configurations specify not to use memory
if self.mem_len == 0 and self.c_mem_len == 0:
return CompressedMemory([], []), []
# Get memory and compressed memory
if mem is not None:
mem, c_mem = mem.mem, mem.c_mem
else:
mem, c_mem = [], []
# Concatenate new memories with old memory
if mem:
mem = [torch.cat((m, x), dim=0) for m, x in zip(mem, new_mem)]
else:
mem = new_mem
# Compress the oldest memories if there are more memories than `mem_len`
if len(mem[0]) > self.mem_len:
# Calculate the number of compressed memories to make $n_{cm} = \bigg\lceil\frac{n'_m - N_m}{c}\bigg\rceil$,
# where $n'_m$ is the number of memories we have
# and $N_m$ is the maximum number of memories we maintain (`mem_len`).
n_c_mem = (len(mem[0]) - self.mem_len + self.compression_rate - 1) // self.compression_rate
# Number of memories to compress $c n_{cm}$
n_old = n_c_mem * self.compression_rate
# A list to keep memories that need to be compressed for each layer.
mem_to_compress = []
# A list to keep the memories that do not get compressed for each layer.
uncompressed_mem = []
# Iterate through memories of each layer.
for m in mem:
# Split the memories at $c n_{cm}$
cm, m = torch.split(m, [n_old, len(m) - n_old])
# Collect memories to compress
mem_to_compress.append(cm)
# Collect remaining memories
uncompressed_mem.append(m)
# Update the memories
mem = uncompressed_mem
# Compress the memories
new_c_mem = []
for i, layer in enumerate(self.model.transformer.layers):
new_c_mem.append(layer.compress(mem_to_compress[i]))
# Concatenate newly compressed memories with old compressed memories
if c_mem:
c_mem = [torch.cat((m, nm), dim=0) for m, nm in zip(c_mem, new_c_mem)]
# If there are no old compressed memories
else:
c_mem = new_c_mem
# Truncate old memories
if len(c_mem[0]) > self.c_mem_len:
c_mem = [m[-self.c_mem_len:] for m in c_mem]
# No memories are compressed if the number of memories is less than `mem_len`
else:
mem_to_compress = []
# Return memories and the memories that were compressed.
# Memories that were compressed are needed for the reconstruction loss computation.
return CompressedMemory(mem, c_mem), mem_to_compress
def step(self, batch: any, batch_idx: BatchIndex):
"""
### Training/validation step
"""
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Update global step (number of tokens processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(data.shape[0] * data.shape[1])
# Whether to capture model outputs
with self.mode.update(is_log_activations=batch_idx.is_last):
# Get memories
mem = self.memory.get()
# Run the model
output, new_mem = self.model(data, mem)
# Merge and compress memory
mem, mem_to_compress = self.merge_compress_memory(mem, new_mem)
# Update memories
self.memory.set(mem)
# Calculate and log cross entropy loss
loss = self.loss_func(output, target)
tracker.add("loss.", loss)
# Calculate attention reconstruction loss if memories were compressed in this step
if mem_to_compress:
# Get attention reconstruction loss
ar_loss = self.attention_reconstruction_loss(new_mem, mem_to_compress)
# Track attention reconstruction loss
tracker.add("ar_loss.", ar_loss)
# Add attention reconstruction loss to loss
loss = loss + ar_loss
# Calculate and log accuracy
self.accuracy(output, target)
self.accuracy.track()
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients on last batch of every epoch
if batch_idx.is_last:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
def sample(self):
"""
### Sampling function to generate samples periodically while training
"""
# Starting prompt
prompt = self.prompt
# Collect output for printing
log = [(prompt, Text.subtle)]
# memory
mem = CompressedMemory([], [])
# Sample 25 tokens
for i in monit.iterate('Sample', 25):
# Tokenize the prompt
data = self.text.text_to_i(prompt).unsqueeze(-1)
# Move to device
data = data.to(self.device)
# Get the model output
output, new_mem = self.model(data, mem)
# Get the model prediction (greedy)
output = output.argmax(dim=-1).squeeze(1)
# Add the prediction to prompt
prompt += self.prompt_separator + self.text.itos[output[-1]]
# Only feed the last character to model in next iteration, rest will go in as memories
prompt = prompt[-1:]
# Add the prediction for logging
log += [(self.prompt_separator + self.text.itos[output[-1]], Text.value)]
# Update and compress memory
mem, _ = self.merge_compress_memory(mem, new_mem)
# Print the sampled output
logger.log(log)
@option(Configs.model)
def autoregressive_model(c: Configs):
"""
### Initialize the auto-regressive model
"""
from labml_nn.transformers.xl import RelativeMultiHeadAttention
from labml_nn.transformers.feed_forward import FeedForward
m = AutoregressiveModel(c.n_tokens, c.d_model, CompressiveTransformer(
CompressiveTransformerLayer(d_model=c.d_model,
self_attn=RelativeMultiHeadAttention(c.heads, c.d_model, c.dropout),
feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout),
dropout_prob=c.dropout,
compress=Conv1dCompression(c.compression_rate, c.d_model)), c.n_layers))
return m.to(c.device)
@option(Configs.attention_reconstruction_loss)
def attention_reconstruction_loss(c: Configs):
"""
### Initialize the attention reconstruction loss
"""
return AttentionReconstructionLoss(c.model.transformer.layers)
def main():
"""
### Run the experiment
"""
# Create experiment
experiment.create(name="compressive_transformer", comment='')
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'text': 'tiny_shakespeare',
'optimizer.learning_rate': 2.5e-4,
'optimizer.optimizer': 'AdamW',
'prompt': 'It is',
'prompt_separator': '',
'train_loader': 'sequential_train_loader',
'valid_loader': 'sequential_valid_loader',
'seq_len': 8,
'mem_len': 8,
'epochs': 128,
'batch_size': 32,
'inner_iterations': 25,
'compression_rate': 2,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# `TrainValidConfigs.run`
conf.run()
#
if __name__ == '__main__':
main()
| 36.883853 | 120 | 0.606836 | [
"MIT"
] | Aarsh2001/annotated_deep_learning_paper_implementations | labml_nn/transformers/compressive/experiment.py | 13,020 | Python |
# coding:utf8
from flask import request
routes = dict()
class ApiServiceBase(type):
def __new__(cls, name, base, attrs):
# super(type, obj) require isinstance(obj, type)
return super(ApiServiceBase, cls).__new__(cls, name, base, attrs)
def __init__(self, name, base, attrs):
if name == 'ApiService':
pass
else:
route = '/' + self.app + '/' + self.resource
if self.resource:
route += '/'
routes[route] = {
'cls': self
}
class ApiService(object):
__metaclass__ = ApiServiceBase
def handle(self):
self.request = request
req_method = getattr(self, self.request.method.lower(), None)
return req_method()
| 19.117647 | 67 | 0.666154 | [
"BSD-3-Clause"
] | qzlzwhx/flask | flask/api_service.py | 650 | Python |
"""
# EXCEL SHEET COLUMN TITLE
Given a positive integer, return its corresponding column title as appear in an Excel sheet.
For example:
1 -> A
2 -> B
3 -> C
...
26 -> Z
27 -> AA
28 -> AB
...
Example 1:
Input: 1
Output: "A"
Example 2:
Input: 28
Output: "AB"
Example 3:
Input: 701
Output: "ZY"
"""
class Solution:
def convertToTitle(self, n: int) -> str:
if n == 0:
return ""
res = ""
while n > 0:
rem = n % 26
if rem == 0:
rem = 26
res = chr(64+rem) + res
n = n // 26
if rem == 26:
n -= 1
return res | 15 | 92 | 0.434043 | [
"MIT"
] | das-jishu/data-structures-basics-leetcode | Leetcode/easy/excel-sheet-column-title.py | 705 | Python |
from .dbnet import DBNet
from .drrg import DRRG
from .fcenet import FCENet
from .ocr_mask_rcnn import OCRMaskRCNN
from .panet import PANet
from .psenet import PSENet
from .single_stage_text_detector import SingleStageTextDetector
from .text_detector_mixin import TextDetectorMixin
from .textsnake import TextSnake
__all__ = [
'TextDetectorMixin', 'SingleStageTextDetector', 'OCRMaskRCNN', 'DBNet',
'PANet', 'PSENet', 'TextSnake', 'FCENet', 'DRRG'
]
| 30.533333 | 75 | 0.790393 | [
"Apache-2.0"
] | A465539338/mmocr | mmocr/models/textdet/detectors/__init__.py | 458 | Python |
from __future__ import print_function
import random
import struct
import sys
import time
import os
import zmq
from msgpack import ExtType, packb, unpackb
class AlignClient(object):
# A synchronous Python2 alignment client
REQ_PREFIX = struct.Struct('=HH')
REQ_SUFFIX = struct.Struct('=Ld')
RESP = struct.Struct('=HHLd?')
def __init__(self, port):
self.port = port
self.ctx = zmq.Context()
self.sock = self.ctx.socket(zmq.DEALER)
self.sock.connect('tcp://127.0.0.1:{}'.format(self.port))
self.counter = 0
self.prefix = self.REQ_PREFIX.pack(os.getpid() % 0x10000,
random.randrange(0x10000))
def packb(self, data):
return packb(data, encoding='utf-8', use_bin_type=True)
def unpackb(self, packed):
return unpackb(packed, use_list=False, encoding='utf-8')
def _new_id(self):
self.counter += 1
if self.counter > 0xffffffff:
self.counter = 0
return (self.prefix + self.REQ_SUFFIX.pack(self.counter, time.time()),
self.counter)
def align(self, sequence):
header, req_id = self._new_id()
bname = 'align'.encode('utf-8')
bargs = self.packb([sequence.decode()])
bkwargs = self.packb(dict())
msg = [header, bname, bargs, bkwargs]
self.sock.send_multipart(msg)
while True:
try:
data = self.sock.recv_multipart(zmq.NOBLOCK)
except zmq.ZMQError as e:
time.sleep(0.1)
else:
header, banswer = data
pid, rnd, res_req_id, timestamp, is_error = self.RESP.unpack(header)
if res_req_id != req_id:
raise ValueError('Received response for request {}, but send {}.'.format(res_res_id, req_id))
answer = self.unpackb(banswer)
return answer
if __name__ == '__main__':
port, seq = sys.argv[1:3]
client = AlignClient(port)
alignments = client.align(seq)
print(alignments)
| 30.085714 | 113 | 0.583096 | [
"MPL-2.0"
] | SamStudio8/pomoxis | pomoxis/align/py2client.py | 2,106 | Python |
##########################################################################
#
# Copyright (c) 2017, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferUITest
import GafferScene
import GafferSceneUI
import GafferDelight
import GafferDelightUI
class DocumentationTest( GafferUITest.TestCase ) :
def test( self ) :
self.maxDiff = None
self.assertNodesAreDocumented(
GafferDelight,
additionalTerminalPlugTypes = ( GafferScene.ScenePlug, )
)
if __name__ == "__main__":
unittest.main()
| 39.109091 | 77 | 0.69642 | [
"BSD-3-Clause"
] | ACFX/gaffer | python/GafferDelightUITest/DocumentationTest.py | 2,151 | Python |
from .config import Config
from .tools import Tools
| 13.25 | 26 | 0.792453 | [
"MIT"
] | prise6/smart-iss-posts | iss/tools/__init__.py | 53 | Python |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from ._base import Ordination, OrdinationResults
from ._utils import svd_rank
class CA(Ordination):
r"""Compute correspondence analysis, a multivariate statistical
technique for ordination.
In general, rows in the data table will correspond to sites and
columns to species, but the method is symmetric. In order to
measure the correspondence between rows and columns, the
:math:`\chi^2` distance is used, and those distances are preserved
in the transformed space. The :math:`\chi^2` distance doesn't take
double zeros into account, and so it is expected to produce better
ordination that PCA when the data has lots of zero values.
It is related to Principal Component Analysis (PCA) but it should
be preferred in the case of steep or long gradients, that is, when
there are many zeros in the input data matrix.
Parameters
----------
X : array_like
Contingency table. It can be applied to different kinds of
data tables but data must be non-negative and dimensionally
homogeneous (quantitative or binary).
Notes
-----
The algorithm is based on [1]_, \S 9.4.1., and is expected to give
the same results as ``cca(X)`` in R's package vegan.
See Also
--------
CCA
References
----------
.. [1] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
short_method_name = 'CA'
long_method_name = 'Canonical Analysis'
def __init__(self, X, row_ids, column_ids):
self.X = np.asarray(X, dtype=np.float64)
self._ca()
self.row_ids = row_ids
self.column_ids = column_ids
def _ca(self):
X = self.X
r, c = X.shape
if X.min() < 0:
raise ValueError("Input matrix elements must be non-negative.")
# Step 1 (similar to Pearson chi-square statistic)
grand_total = X.sum()
Q = X / grand_total
column_marginals = Q.sum(axis=0)
row_marginals = Q.sum(axis=1)
# Let's store them since they're needed to compute scores
self.column_marginals = column_marginals
self.row_marginals = row_marginals
# Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's
# an scaled version of the contribution of each cell towards
# Pearson chi-square statistic.
expected = np.outer(row_marginals, column_marginals)
Q_bar = (Q - expected) / np.sqrt(expected) # Eq. 9.32
# Step 2 (Singular Value Decomposition)
U_hat, W, Ut = np.linalg.svd(Q_bar, full_matrices=False)
# Due to the centering, there are at most min(r, c) - 1 non-zero
# eigenvalues (which are all positive)
rank = svd_rank(Q_bar.shape, W)
assert rank <= min(r, c) - 1
self.U_hat = U_hat[:, :rank]
self.W = W[:rank]
self.U = Ut[:rank].T
def scores(self, scaling):
r"""Compute site and species scores for different scalings.
Parameters
----------
scaling : int
For a more detailed explanation of the interpretation, check
Legendre & Legendre 1998, section 9.4.3. The notes that
follow are quick recommendations.
Scaling type 1 maintains :math:`\chi^2` distances between
rows (sites): in the transformed space, the euclidean
distances between rows are equal to the :math:`\chi^2`
distances between rows in the original space. It should be
used when studying the ordination of sites. Rows (sites)
that are near a column (species) have high contributions
from it.
Scaling type 2 preserves :math:`\chi^2` distances between
columns (species), so euclidean distance between columns
after transformation is equal to :math:`\chi^2` distance
between columns in the original space. It is best used
when we are interested in the ordination of species. A
column (species) that is next to a row (site) means that
it is more abundant there.
Other types of scalings are currently not implemented, as
they're less used by ecologists (Legendre & Legendre 1998,
p. 456).
In general, species appearing far from the center of the
biplot and far from its edges will probably exhibit better
relationships than species either in the center (may be
multimodal species, not related to the shown ordination
axes...) or the edges (sparse species...).
Returns
-------
OrdinationResults
Object that stores the computed eigenvalues, the
proportion explained by each of them (per unit),
transformed coordinates, etc.
See Also
--------
OrdinationResults
"""
if scaling not in {1, 2}:
raise NotImplementedError(
"Scaling {0} not implemented.".format(scaling))
# Both scalings are a bit intertwined, so we'll compute both and
# then choose
V = self.column_marginals[:, None]**-0.5 * self.U
V_hat = self.row_marginals[:, None]**-0.5 * self.U_hat
F = V_hat * self.W
# According to Formula 9.43, this should hold
# assert np.allclose(F, (row_marginals**-1)[:, None] * Q.dot(V))
# but it doesn't (notice that W**2==Lambda):
# (9.43a) F = V_hat W = D(p_i+)^{-1/2} U_hat W
# = D(p_i+)^{-1/2} Q_bar U W^{-1} W (substituting 9.38)
# = D(p_i+)^{-1/2} Q_bar U
# (9.43b) F = D(p_i+)^{-1} Q V
# = D(p_i+)^{-1} Q D(p_+j)^{-1/2} U (substituting 9.41)
# = D(p_i+)^{-1/2} D(p_i+)^{-1/2} Q D(p_+j)^{-1/2} U
# = D(p_i+)^{-1/2} Q_tilde U (using 9.40)
# It holds if we replace Q in 9.43b with Q after centering, ie
# assert np.allclose(
# F,
# (row_marginals**-1)[:, None] * (Q - expected).dot(V))
# Comparing results with vegan and the examples in the book, 9.43a
# is the right one. The same issue happens in 9.44, where also
# 9.44a is the one that matches vegan's output.
# (9.44a) F_hat = V W = D(p_+j)^{-1/2} U W
# = D(p_+j)^{-1/2} Q_bar' U_hat W^{-1} W (using 9.39)
# = D(p_+j)^{-1/2} Q_bar' U_hat
# (9.44b) F_hat = D(p_+j)^{-1} Q' V_hat
# = D(p_+j)^{-1/2} Q_tilde' U_hat (using 9.40 and 9.42)
F_hat = V * self.W
# Eigenvalues
eigvals = self.W**2
# Species scores
species_scores = [V, F_hat][scaling - 1]
# Site scores (weighted averages of species scores)
site_scores = [F, V_hat][scaling - 1]
return OrdinationResults(eigvals=eigvals, species=species_scores,
site=site_scores, site_ids=self.row_ids,
species_ids=self.column_ids)
| 39.957447 | 78 | 0.582801 | [
"BSD-3-Clause"
] | JWDebelius/scikit-bio | skbio/stats/ordination/_correspondence_analysis.py | 7,512 | Python |
"""
Projection class for the Sunyaev-Zeldovich effect. Requires SZpack (version 1.1.1),
which is included in SZpack.v1.1.1 and will be automatically installed.
Website for the SZpack library: http://www.chluba.de/SZpack/
For details on the computations involved please refer to the following references:
Chluba, Nagai, Sazonov, Nelson, MNRAS, 2012, arXiv:1205.5778
Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206
Many thanks to John ZuHone, who wrote the yt part of this model.
"""
import numpy as np
from pymsz.SZpacklib import SZpack
# I0 = (2 * (kboltz * Tcmb)**3 / ((hcgs * clight)**2) / units.sr).in_units("MJy/steradian")
class SZpack_model(object):
r""" Theoretical calculation of y and T_sz -map for the thermal SZ effect.
model = TH_model(model_file, npixel, axis)
Parameters
----------
simudata : the simulation data from load_data
freqs : The frequencies (in GHz) at which to compute the SZ spectral distortion. array_like
npixel : number of pixels for your image, int.
Assume that x-y have the same number of pixels
axis : can be 'x', 'y', 'z', or a list of degrees [alpha, beta, gamma],
which will rotate the data points by $\alpha$ around the x-axis,
$\beta$ around the y-axis, and $\gamma$ around the z-axis
neighbours: this parameter only works with simulation data (not yt data).
If this is set, it will force the SPH particles smoothed into nearby N
neighbours, HSML from the simulation will be ignored.
If no HSML provided in the simulation, neighbours = 27
AR : angular resolution in arcsec.
Default : None, which gives npixel = 2 * cluster radius
and ignores the cluster's redshift.
Otherwise, cluster's redshift with AR decides how large the cluster looks.
redshift : The redshift where the cluster is at.
Default : None, we will look it from simulation data.
If redshift = 0, it will be automatically put into 0.02,
unless AR is set to None.
high_order : boolean, optional
Should we calculate high-order moments of velocity and temperature?
Returns
-------
Theoretical projected y-map in a given direction. 2D mesh data right now.
See also
--------
SZ_models for the mock SZ signal at different frequencies.
Notes
-----
Examples
--------
>>> freqs = [90., 180., 240.]
>>> szprj = SZProjection(ds, freqs, high_order=True)
"""
def __init__(self, simudata, freqs, npixel=500, neighbours=None, axis='z', AR=None,
redshift=None):
self.npl = npixel
self.ngb = neighbours
self.ax = axis
self.ar = AR
self.red = redshift
self.pxs = 0
self.ydata = np.array([])
self.freqs = np.asarray(freqs)
if simudata.data_type == "snapshot":
self._cal_ss(simudata)
elif simudata.data_type == "yt_data":
self._cal_yt(simudata)
else:
raise ValueError("Do not accept this data type %s"
"Please try to use load_data to get the data" % simudata.data_type)
# def _cal_ss(self, simd):
# Kpc = 3.0856775809623245e+21 # cm
# simd.prep_ss_SZ()
#
# def _cal_yt(self, simd):
# from yt.config import ytcfg
# from yt.utilities.physical_constants import sigma_thompson, clight, mh
# # kboltz, Tcmb, hcgs,
# from yt.funcs import fix_axis, get_pbar
# from yt.visualization.volume_rendering.off_axis_projection import \
# off_axis_projection
# from yt.utilities.parallel_tools.parallel_analysis_interface import \
# communication_system, parallel_root_only
# # from yt import units
# from yt.utilities.on_demand_imports import _astropy
#
# def generate_beta_par(L):
# def _beta_par(field, data):
# vpar = data["density"] * (data["velocity_x"] * L[0] +
# data["velocity_y"] * L[1] +
# data["velocity_z"] * L[2])
# return vpar / clight
# return _beta_par
# Ptype = simd.prep_yt_SZ()
#
# # self.ds = ds
# # self.num_freqs = len(freqs)
# # self.high_order = high_order
# # self.freqs = ds.arr(freqs, "GHz")
# # self.mueinv = 1. / mue
# # self.xinit = hcgs * self.freqs.in_units("Hz") / (kboltz * Tcmb)
# # self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
# # self.data = {}
# #
# # self.display_names = {}
# # self.display_names["TeSZ"] = r"$\mathrm{T_e}$"
# # self.display_names["Tau"] = r"$\mathrm{\tau}$"
# #
# # for f, field in zip(self.freqs, self.freq_fields):
# # self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % int(f)
# #
# # def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None):
# # r""" Make an on-axis projection of the SZ signal.
# #
# # Parameters
# # ----------
# # axis : integer or string
# # The axis of the simulation domain along which to make the SZprojection.
# # center : A sequence of floats, a string, or a tuple.
# # The coordinate of the center of the image. If set to 'c', 'center' or
# # left blank, the plot is centered on the middle of the domain. If set to
# # 'max' or 'm', the center will be located at the maximum of the
# # ('gas', 'density') field. Centering on the max or min of a specific
# # field is supported by providing a tuple such as ("min","temperature") or
# # ("max","dark_matter_density"). Units can be specified by passing in *center*
# # as a tuple containing a coordinate and string unit name or by passing
# # in a YTArray. If a list or unitless array is supplied, code units are
# # assumed.
# # width : tuple or a float.
# # Width can have four different formats to support windows with variable
# # x and y widths. They are:
# #
# # ================================== =======================
# # format example
# # ================================== =======================
# # (float, string) (10,'kpc')
# # ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
# # float 0.2
# # (float, float) (0.2, 0.3)
# # ================================== =======================
# #
# # For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
# # wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
# # window that is 10 kiloparsecs wide along the x axis and 15
# # kiloparsecs wide along the y axis. In the other two examples, code
# # units are assumed, for example (0.2, 0.3) requests a plot that has an
# # x width of 0.2 and a y width of 0.3 in code units. If units are
# # provided the resulting plot axis labels will use the supplied units.
# # nx : integer, optional
# # The dimensions on a side of the projection image.
# # source : yt.data_objects.data_containers.YTSelectionContainer, optional
# # If specified, this will be the data source used for selecting regions to project.
# #
# # Examples
# # --------
# # >>> szprj.on_axis("y", center="max", width=(1.0, "Mpc"), source=my_sphere)
# # """
#
# axis = fix_axis(axis, self.ds)
# ctr, dctr = self.ds.coordinates.sanitize_center(center, axis)
# width = self.ds.coordinates.sanitize_width(axis, width, None)
#
# L = np.zeros(3)
# L[axis] = 1.0
#
# beta_par = generate_beta_par(L)
# self.ds.add_field(("gas", "beta_par"), function=beta_par, units="g/cm**3")
# setup_sunyaev_zeldovich_fields(self.ds)
# proj = self.ds.proj("density", axis, center=ctr, data_source=source)
# frb = proj.to_frb(width[0], nx, height=width[1])
# dens = frb["density"]
# Te = frb["t_sz"] / dens
# bpar = frb["beta_par"] / dens
# omega1 = frb["t_squared"] / dens / (Te * Te) - 1.
# bperp2 = np.zeros((nx, nx))
# sigma1 = np.zeros((nx, nx))
# kappa1 = np.zeros((nx, nx))
# if self.high_order:
# bperp2 = frb["beta_perp_squared"] / dens
# sigma1 = frb["t_beta_par"] / dens / Te - bpar
# kappa1 = frb["beta_par_squared"] / dens - bpar * bpar
# tau = sigma_thompson * dens * self.mueinv / mh
#
# nx, ny = frb.buff_size
# self.bounds = frb.bounds
# self.dx = (frb.bounds[1] - frb.bounds[0]) / nx
# self.dy = (frb.bounds[3] - frb.bounds[2]) / ny
# self.nx = nx
#
# self._compute_intensity(np.array(tau), np.array(Te), np.array(bpar),
# np.array(omega1), np.array(sigma1),
# np.array(kappa1), np.array(bperp2))
#
# self.ds.field_info.pop(("gas", "beta_par"))
#
# def off_axis(self, L, center="c", width=(1.0, "unitary"), depth=(1.0, "unitary"),
# nx=800, nz=800, north_vector=None, no_ghost=False, source=None):
# r""" Make an off-axis projection of the SZ signal.
#
# Parameters
# ----------
# L : array_like
# The normal vector of the projection.
# center : A sequence of floats, a string, or a tuple.
# The coordinate of the center of the image. If set to 'c', 'center' or
# left blank, the plot is centered on the middle of the domain. If set to
# 'max' or 'm', the center will be located at the maximum of the
# ('gas', 'density') field. Centering on the max or min of a specific
# field is supported by providing a tuple such as ("min","temperature") or
# ("max","dark_matter_density"). Units can be specified by passing in *center*
# as a tuple containing a coordinate and string unit name or by passing
# in a YTArray. If a list or unitless array is supplied, code units are
# assumed.
# width : tuple or a float.
# Width can have four different formats to support windows with variable
# x and y widths. They are:
#
# ================================== =======================
# format example
# ================================== =======================
# (float, string) (10,'kpc')
# ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
# float 0.2
# (float, float) (0.2, 0.3)
# ================================== =======================
#
# For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
# wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
# window that is 10 kiloparsecs wide along the x axis and 15
# kiloparsecs wide along the y axis. In the other two examples, code
# units are assumed, for example (0.2, 0.3) requests a plot that has an
# x width of 0.2 and a y width of 0.3 in code units. If units are
# provided the resulting plot axis labels will use the supplied units.
# depth : A tuple or a float
# A tuple containing the depth to project through and the string
# key of the unit: (width, 'unit'). If set to a float, code units
# are assumed
# nx : integer, optional
# The dimensions on a side of the projection image.
# nz : integer, optional
# Deprecated, this is still in the function signature for API
# compatibility
# north_vector : a sequence of floats
# A vector defining the 'up' direction in the plot. This
# option sets the orientation of the slicing plane. If not
# set, an arbitrary grid-aligned north-vector is chosen.
# no_ghost: bool, optional
# Optimization option for off-axis cases. If True, homogenized bricks will
# extrapolate out from grid instead of interpolating from
# ghost zones that have to first be calculated. This can
# lead to large speed improvements, but at a loss of
# accuracy/smoothness in resulting image. The effects are
# less notable when the transfer function is smooth and
# broad. Default: True
# source : yt.data_objects.data_containers.YTSelectionContainer, optional
# If specified, this will be the data source used for selecting regions
# to project.
#
# Examples
# --------
# >>> L = np.array([0.5, 1.0, 0.75])
# >>> szprj.off_axis(L, center="c", width=(2.0, "Mpc"))
# """
# wd = self.ds.coordinates.sanitize_width(L, width, depth)
# w = tuple(el.in_units('code_length').v for el in wd)
# ctr, dctr = self.ds.coordinates.sanitize_center(center, L)
# res = (nx, nx)
#
# if source is None:
# source = self.ds
#
# beta_par = generate_beta_par(L)
# self.ds.add_field(("gas", "beta_par"), function=beta_par, units="g/cm**3")
# setup_sunyaev_zeldovich_fields(self.ds)
#
# dens = off_axis_projection(source, ctr, L, w, res, "density",
# north_vector=north_vector, no_ghost=no_ghost)
# Te = off_axis_projection(source, ctr, L, w, res, "t_sz",
# north_vector=north_vector, no_ghost=no_ghost) / dens
# bpar = off_axis_projection(source, ctr, L, w, res, "beta_par",
# north_vector=north_vector, no_ghost=no_ghost) / dens
# omega1 = off_axis_projection(source, ctr, L, w, res, "t_squared",
# north_vector=north_vector, no_ghost=no_ghost) / dens
# omega1 = omega1 / (Te * Te) - 1.
# if self.high_order:
# bperp2 = off_axis_projection(source, ctr, L, w, res, "beta_perp_squared",
# north_vector=north_vector, no_ghost=no_ghost) / dens
# sigma1 = off_axis_projection(source, ctr, L, w, res, "t_beta_par",
# north_vector=north_vector, no_ghost=no_ghost) / dens
# sigma1 = sigma1 / Te - bpar
# kappa1 = off_axis_projection(source, ctr, L, w, res, "beta_par_squared",
# north_vector=north_vector, no_ghost=no_ghost) / dens
# kappa1 -= bpar
# else:
# bperp2 = np.zeros((nx, nx))
# sigma1 = np.zeros((nx, nx))
# kappa1 = np.zeros((nx, nx))
# tau = sigma_thompson * dens * self.mueinv / mh
#
# self.bounds = (-0.5 * wd[0], 0.5 * wd[0], -0.5 * wd[1], 0.5 * wd[1])
# self.dx = wd[0] / nx
# self.dy = wd[1] / nx
# self.nx = nx
#
# self._compute_intensity(np.array(tau), np.array(Te), np.array(bpar),
# np.array(omega1), np.array(sigma1),
# np.array(kappa1), np.array(bperp2))
#
# self.ds.field_info.pop(("gas", "beta_par"))
#
# def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
#
# # Bad hack, but we get NaNs if we don't do something like this
# small_beta = np.abs(bpar) < 1.0e-20
# bpar[small_beta] = 1.0e-20
#
# comm = communication_system.communicators[-1]
#
# nx, ny = self.nx, self.nx
# signal = np.zeros((self.num_freqs, nx, ny))
# xo = np.zeros(self.num_freqs)
#
# k = int(0)
#
# start_i = comm.rank * nx // comm.size
# end_i = (comm.rank + 1) * nx // comm.size
#
# pbar = get_pbar("Computing SZ signal.", nx * nx)
#
# for i in range(start_i, end_i):
# for j in range(ny):
# xo[:] = self.xinit[:]
# SZpack.compute_combo_means(xo, tau[i, j], Te[i, j],
# bpar[i, j], omega1[i, j],
# sigma1[i, j], kappa1[i, j], bperp2[i, j])
# signal[:, i, j] = xo[:]
# pbar.update(k)
# k += 1
#
# signal = comm.mpi_allreduce(signal)
#
# pbar.finish()
#
# for i, field in enumerate(self.freq_fields):
# self.data[field] = I0 * self.xinit[i]**3 * signal[i, :, :]
# self.data["Tau"] = self.ds.arr(tau, "dimensionless")
# self.data["TeSZ"] = self.ds.arr(Te, "keV")
#
# def write_fits(self, filename, sky_scale=None, sky_center=None, clobber=True):
# r""" Export images to a FITS file. Writes the SZ distortion in all
# specified frequencies as well as the mass-weighted temperature and the
# optical depth. Distance units are in kpc, unless *sky_center*
# and *scale* are specified.
#
# Parameters
# ----------
# filename : string
# The name of the FITS file to be written.
# sky_scale : tuple
# Conversion between an angle unit and a length unit, if sky
# coordinates are desired, e.g. (1.0, "arcsec/kpc")
# sky_center : tuple, optional
# The (RA, Dec) coordinate in degrees of the central pixel. Must
# be specified with *sky_scale*.
# clobber : boolean, optional
# If the file already exists, do we overwrite?
#
# Examples
# --------
# >>> # This example just writes out a FITS file with kpc coords
# >>> szprj.write_fits("SZbullet.fits", clobber=False)
# >>> # This example uses sky coords
# >>> sky_scale = (1., "arcsec/kpc") # One arcsec per kpc
# >>> sky_center = (30., 45., "deg")
# >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale)
# """
# from yt.visualization.fits_image import FITSImageData
#
# dx = self.dx.in_units("kpc")
# dy = dx
#
# w = _astropy.pywcs.WCS(naxis=2)
# w.wcs.crpix = [0.5 * (self.nx + 1)] * 2
# w.wcs.cdelt = [dx.v, dy.v]
# w.wcs.crval = [0.0, 0.0]
# w.wcs.cunit = ["kpc"] * 2
# w.wcs.ctype = ["LINEAR"] * 2
#
# fib = FITSImageData(self.data, fields=self.data.keys(), wcs=w)
# if sky_scale is not None and sky_center is not None:
# fib.create_sky_wcs(sky_center, sky_scale)
# fib.writeto(filename, clobber=clobber)
#
# @parallel_root_only
# def write_png(self, filename_prefix, cmap_name=None,
# axes_units="kpc", log_fields=None):
# r""" Export images to PNG files. Writes the SZ distortion in all
# specified frequencies as well as the mass-weighted temperature and the
# optical depth. Distance units are in kpc.
#
# Parameters
# ----------
# filename_prefix : string
# The prefix of the image filenames.
#
# Examples
# --------
# >>> szprj.write_png("SZsloshing")
# """
# if cmap_name is None:
# cmap_name = ytcfg.get("yt", "default_colormap")
#
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# if log_fields is None:
# log_fields = {}
# ticks_font = matplotlib.font_manager.FontProperties(family='serif', size=16)
# extent = tuple([bound.in_units(axes_units).value for bound in self.bounds])
# for field, image in self.items():
# data = image.copy()
# vmin, vmax = image.min(), image.max()
# negative = False
# crossover = False
# if vmin < 0 and vmax < 0:
# data *= -1
# negative = True
# if field in log_fields:
# log_field = log_fields[field]
# else:
# log_field = True
# if log_field:
# formatter = matplotlib.ticker.LogFormatterMathtext()
# norm = matplotlib.colors.LogNorm()
# if vmin < 0 and vmax > 0:
# crossover = True
# linthresh = min(vmax, -vmin) / 100.
# norm = matplotlib.colors.SymLogNorm(linthresh,
# vmin=vmin, vmax=vmax)
# else:
# norm = None
# formatter = None
# filename = filename_prefix + "_" + field + ".png"
# cbar_label = self.display_names[field]
# units = self.data[field].units.latex_representation()
# if units is not None and units != "":
# cbar_label += r'$\ \ (' + units + r')$'
# fig = plt.figure(figsize=(10.0, 8.0))
# ax = fig.add_subplot(111)
# cax = ax.imshow(data.d, norm=norm, extent=extent, cmap=cmap_name, origin="lower")
# for label in ax.get_xticklabels():
# label.set_fontproperties(ticks_font)
# for label in ax.get_yticklabels():
# label.set_fontproperties(ticks_font)
# ax.set_xlabel(r"$\mathrm{x\ (%s)}$" % axes_units, fontsize=16)
# ax.set_ylabel(r"$\mathrm{y\ (%s)}$" % axes_units, fontsize=16)
# cbar = fig.colorbar(cax, format=formatter)
# cbar.ax.set_ylabel(cbar_label, fontsize=16)
# if negative:
# cbar.ax.set_yticklabels(["-" + label.get_text()
# for label in cbar.ax.get_yticklabels()])
# if crossover:
# yticks = list(-10**np.arange(np.floor(np.log10(-vmin)),
# np.rint(np.log10(linthresh)) - 1, -1)) + [0] + \
# list(10**np.arange(np.rint(np.log10(linthresh)),
# np.ceil(np.log10(vmax)) + 1))
# cbar.set_ticks(yticks)
# for label in cbar.ax.get_yticklabels():
# label.set_fontproperties(ticks_font)
# fig.tight_layout()
# plt.savefig(filename)
#
# @parallel_root_only
# def write_hdf5(self, filename):
# r"""Export the set of S-Z fields to a set of HDF5 datasets.
#
# Parameters
# ----------
# filename : string
# This file will be opened in "write" mode.
#
# Examples
# --------
# >>> szprj.write_hdf5("SZsloshing.h5")
# """
# for field, data in self.items():
# data.write_hdf5(filename, dataset_name=field)
#
# def keys(self):
# return self.data.keys()
#
# def items(self):
# return self.data.items()
#
# def values(self):
# return self.data.values()
#
# def has_key(self, key):
# return key in self.data.keys()
#
# def __getitem__(self, key):
# return self.data[key]
#
# @property
# def shape(self):
# return (self.nx, self.nx)
| 46.893617 | 101 | 0.520995 | [
"MIT"
] | weiguangcui/pymsz | pymsz/SZpack_models.py | 24,244 | Python |
"""
Example file on how to display a networkx graph on a browser
"""
import json
import networkx as nx
from networkx.readwrite import json_graph
import http_server
import random
# https://www.alanzucconi.com/2015/11/03/recreational-maths-python/
# Converts a number in the list of its digits
def int_to_list(n):
# The number is firstly converted into a string using str(n)
# map -> converts each character of the string into an integer
return map(int, str(n))
# https://www.alanzucconi.com/2015/11/01/interactive-graphs-in-the-browser/
def toy_graph():
G = nx.DiGraph()
for i in range(1, 1000):
tree = list(set(list(int_to_list(random.randint(1, i)))))
# Add the entire sequence to the tree
for j in range(0, len(tree) - 1):
G.add_edge(tree[j], tree[j + 1])
for n in G:
G.node[n]['name'] = n
d = json_graph.node_link_data(G)
json.dump(d, open('graph/graph.json', 'w'))
# The http_server is just a short piece of code that used to be in the
# examples directory of the networkx library.
http_server.load_url('graph/graph.html')
if __name__ == '__main__':
toy_graph()
| 27.186047 | 75 | 0.673225 | [
"MIT"
] | dborbor/InteractiveGraph | program.py | 1,169 | Python |
# TODO: not very happy with this state of affairs of having unencrypted passwords (use keyring ?)
ALYX_PWD = 'alyxpassword'
HTTP_DATA_SERVER_PWD = 'httpserverpass' # password for flat iron server for IBLail
| 52 | 97 | 0.788462 | [
"MIT"
] | GaelleChapuis/ibllib | python/oneibl/params_secret.py | 208 | Python |
from typing import Optional, List
from pydantic import BaseModel
class LogSetting(BaseModel):
LOG_LEVEL: Optional[str] = 'DEBUG'
LOG_PATH: str
class ServiceSetting(BaseModel):
# openapi swagger
INCLUDE_IN_SCHEMA: Optional[bool] = True
# socket.io on
SOCKET_IO_ON: Optional[bool] = False
class SocketIOSetting(BaseModel):
SOCKET_IO_NAMESPACES: Optional[List[str]] = ['/']
SOCKET_IO_MOUNT: Optional[str] = '/'
| 20.363636 | 53 | 0.712054 | [
"MIT"
] | panla/fastapi_sockets | conf/settings.py | 448 | Python |
# --------------
# Importing header files
import numpy as np
# print(path)
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
census = np.array([])
data=np.genfromtxt(path, delimiter=",", skip_header=1)
census = np.concatenate((data , new_record))
# print(census)
# --------------
#Code starts here
age = census[:,0]
max_age = np.max(age)
min_age = np.min(age)
age_mean = np.mean(age)
age_std = np.std(age)
# --------------
# Code starts here
# race_0 = np.array([])
# race_1 = np.array([])
# race_2 = np.array([])
# race_3 = np.array([])
# race_4 = np.array([])
# for i in range(0,census.shape[0]):
# if int(census[i,2]) == 0:
# race_0 = np.concatenate(race_0 , np.array([census[i , :]]))
# elif int(census[i,2]) == 1:
# race_1 = np.concatenate(race_1 , np.array([census[i , :]]))
# elif int(census[i,2]) == 2:
# race_2 = np.concatenate(race_2 , np.array([census[i , :]]))
# elif int(census[i,2]) == 3:
# race_3 = np.concatenate(race_3 , np.array([census[i , :]]))
# else:
# race_4 = np.concatenate(race_4 , np.array([census[i , :]]))
# print('r0 \n' , race_0)
# print(census[0 , :])
# len_0 , len_1 , len_2 , len_3 , len_4 = len(race_0) , len(race_1) , len(race_2) , len(race_3) , len(race_4)
# minority_race = np.min(np.array([len_0 , len_1 , len_2 , len_3 , len_4]))
# race_0 = np.array([])
# for i in range(0,census.shape[0]):
# if int(census[i,2]) == 0:
# race_0 = np.append(race_0 , np.array([census[i , :]]))
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
Race_list=[len_0,len_1,len_2,len_3,len_4]
minority_race=Race_list.index(min(Race_list))
print(minority_race)
# --------------
#Code starts here
senior_citizens = census[census[:,0]>60]
working_hours_sum = senior_citizens.sum(axis=0)[6]
senior_citizens_len = len(senior_citizens)
avg_working_hours = (working_hours_sum)/(senior_citizens_len)
print(avg_working_hours)
# --------------
#Code starts here
high = census[census[:,1]>10]
low = census[census[:,1]<=10]
avg_pay_high = high.mean(axis=0)[7]
avg_pay_low = low.mean(axis=0)[7]
avg_pay_high,avg_pay_low.mean()
| 26.319149 | 110 | 0.605497 | [
"MIT"
] | amitgupta98/ga-learner-dsmp-repo | Make-Sense-of-census/code.py | 2,474 | Python |
"""
Generate and operate on geometric elements and meshes.
"""
from __future__ import absolute_import
from .geometry import Polygon, Square, Prism, Tesseroid, Sphere
from .geometry import PolygonalPrism
from .mesh import SquareMesh, PointGrid, PrismRelief, PrismMesh, TesseroidMesh
| 31.444444 | 78 | 0.812721 | [
"BSD-3-Clause"
] | Claudiadtx/Pelotas | fatiando/mesher/__init__.py | 283 | Python |
import subprocess
import gdspy
import shutil
from utils import *
from codegen.caravel_codegen import generate_openlane_files
from urllib.parse import urlparse
import os, json
REQUIRED_KEYS_SINGLE = ["project", "caravel_test", "module_test", "wrapper_proof", "openlane", "gds"]
class Project(object):
def __init__(self, args, repo, commit, required_interfaces, system_config):
self.args = args
self.system_config = system_config
self.repo = repo # the repo on github
self.commit = commit # not strictly a commit, could be a branch
project_dir = self.system_config['configuration']['project_directory']
# the project's directory is made by joining project dir to last part of the repo url
parsed = urlparse(repo)
self.directory = os.path.join(project_dir, parsed.path.rpartition('/')[-1])
if args.clone_repos:
self.clone_repo()
self.gitsha = get_git_sha(self.directory)
yaml_file = os.path.join(self.directory, 'info.yaml')
self.config = parse_config(yaml_file, REQUIRED_KEYS_SINGLE)
self.id = int(self.config['caravel_test']['id'])
self.module_name = self.config['caravel_test']['module_name']
self.interfaces = required_interfaces + self.config['interfaces']
self.gds_filename = os.path.join(self.config['gds']['directory'], self.config['gds']['gds_filename'])
self.lef_filename = os.path.join(self.config['gds']['directory'], self.config['gds']['lef_filename'])
self.lvs_filename = os.path.join(self.config['gds']['directory'], self.config['gds']['lvs_filename'])
self.title = self.config['project']['title']
self.author = self.config['project']['author']
def __str__(self):
return "%2d %-30s : %s" % (self.id, self.title, self.directory)
def run_tests(self):
# print out info about the project
if self.args.dump_hash:
logging.info("%-30s %-20s %s %s" % (self.author, self.title, self.gitsha, self.repo))
else:
logging.info(self)
if self.args.test_all or self.args.test_module:
self.test_module()
if self.args.test_all or self.args.prove_wrapper:
self.prove_wrapper()
if self.args.test_all or self.args.test_caravel:
self.test_caravel()
# don't run this as part of test-all
if self.args.test_caravel_gl:
self.test_caravel(gl=True)
if self.args.test_all or self.args.test_gds:
self.test_gds()
# currently broken, waiting on testing a new netgen
if self.args.test_all or self.args.test_lvs:
self.test_lvs()
if self.args.test_all or self.args.test_ports:
self.validate_ports()
if self.args.test_all or self.args.test_tristate_z:
self.test_tristate_z()
if self.args.test_all or self.args.test_git:
self.test_git_match()
def clone_repo(self):
clone_repo(self.repo, self.commit, self.directory, self.args.force_delete)
# hack - better to add this to the info.yaml but for now we do it by searching all the source files. not all are called wrapper.v
def get_top_module(self):
paths = self.get_module_source_paths(absolute=False)
top_instance = 'module %s' % self.config['caravel_test']['module_name']
# now check each source for the top_name
for path in paths:
abs_path = os.path.abspath(os.path.join(self.directory, path))
with open(abs_path) as fh:
if top_instance in fh.read():
return path
else:
logging.error("couldn't find top module for %s" % self)
exit(1)
def get_module_source_paths(self, absolute=True):
paths = []
for path in self.config['source']:
if absolute:
paths.append(os.path.abspath(os.path.join(self.directory, path)))
else:
paths.append(path)
return paths
def test_module(self):
conf = self.config["module_test"]
cwd = os.path.join(self.directory, conf["directory"])
cmd = ["make", "-f", conf["makefile"], conf["recipe"]]
logging.info("attempting to run %s in %s" % (cmd, cwd))
try:
subprocess.run(cmd, cwd=cwd, check=True)
except subprocess.CalledProcessError as e:
logging.error(e)
exit(1)
logging.info("test pass")
def test_git_match(self):
self.gitsha = get_git_sha(self.directory)
if self.gitsha != self.commit:
logging.error("gitsha on disk doesn't match config")
exit(1)
else:
logging.info("git pass")
def prove_wrapper(self):
# TODO need to also check properties.sby - could have a few things to cksum and make wrapper_cksum able to check a few files
conf = self.config["wrapper_proof"]
cwd = os.path.join(self.directory, conf["directory"])
cmd = ["sby", "-f", conf["sby"]]
logging.info("attempting to run %s in %s" % (cmd, cwd))
try:
subprocess.run(cmd, cwd=cwd, check=True)
except subprocess.CalledProcessError as e:
logging.error(e)
exit(1)
logging.info("proof pass")
def copy_project_to_caravel_rtl(self):
src = self.directory
dst = os.path.join(self.system_config['caravel']['rtl_dir'], os.path.basename(self.directory))
try_copy_tree(src, dst, self.args.force_delete)
def copy_gl(self):
dst = os.path.join(self.system_config['caravel']['gl_dir'], self.config['gds']['lvs_filename'])
src = os.path.join(self.directory, self.config['gds']['directory'], self.config['gds']['lvs_filename'])
shutil.copyfile(src, dst)
def test_caravel(self, gl=False):
conf = self.config["caravel_test"]
# copy src into caravel verilog dir
self.copy_project_to_caravel_rtl()
# generate includes & instantiate inside user project wrapper
# could this be removed and just do it in collect.py ?
user_project_wrapper_path = os.path.join(self.system_config['caravel']['rtl_dir'], "user_project_wrapper.v")
caravel_includes_path = os.path.join(self.system_config['caravel']['rtl_dir'], "uprj_netlists.v")
user_project_includes_path = os.path.join(self.system_config['caravel']['rtl_dir'], "user_project_includes.v")
interface_definitions = {
**self.system_config['interfaces']['required'],
**self.system_config['interfaces']['optional']
}
generate_openlane_files(
[self],
interface_definitions,
user_project_wrapper_path,
user_project_includes_path,
caravel_includes_path,
self.args.openram
)
# copy test inside caravel
src = os.path.join(self.directory, conf["directory"])
dst = os.path.join(self.system_config['caravel']['test_dir'], conf["directory"])
try_copy_tree(src, dst, self.args.force_delete)
# set up env
test_env = os.environ.copy()
test_env["GCC_PATH"] = self.system_config['env']['GCC_PATH']
test_env["GCC_PREFIX"] = self.system_config['env']['GCC_PREFIX']
test_env["PDK_PATH"] = self.system_config['env']['PDK_PATH']
test_env["CARAVEL_ROOT"] = os.path.join(self.system_config['caravel']['root'], 'caravel')
cwd = os.path.join(self.system_config['caravel']['test_dir'], conf["directory"])
cmd = ["make", conf["recipe"]]
# if gl, use the gl_recipe
if gl:
cmd = ["make", conf["gl_recipe"]]
logging.info("attempting to run %s in %s" % (cmd, cwd))
# run makefile
try:
subprocess.run(cmd, cwd=cwd, env=test_env, check=True)
except subprocess.CalledProcessError as e:
logging.error(e)
exit(1)
logging.info("caravel test pass")
def get_gds_size(self):
conf = self.config["gds"]
gds_file = os.path.abspath(os.path.join(self.directory, conf["directory"], conf["gds_filename"]))
gdsii = gdspy.GdsLibrary(infile=gds_file)
toplevel = gdsii.top_level()[0]
return toplevel.get_bounding_box()[1]
def test_gds(self):
if 'waive_gds' in self.config['project']:
logging.info("skipping GDS in this test due to %s" % self.config['project']['waive_gds'])
return
conf = self.config["gds"]
gds_file = os.path.abspath(os.path.join(self.directory, conf["directory"], conf["gds_filename"]))
gdsii = gdspy.GdsLibrary(infile=gds_file)
toplevel = gdsii.top_level()[0]
# nothing on metal 5
if self.system_config["tests"]["gds"]["metal5_id"] in toplevel.get_layers():
logging.error("%s has layers on metal5" % gds_file)
exit(1)
logging.info("GDS pass")
def test_lvs(self):
if 'waive_lvs' in self.config['project']:
logging.info("skipping LVS in this test due to %s" % self.config['project']['waive_lvs'])
return
module_name = self.config['caravel_test']['module_name']
conf = self.config["gds"]
# given
lvs_test_dir = 'lvstest'
try_mkdir(lvs_test_dir, self.args.force_delete)
# copy the gds and verilog to local directory
gds_file = os.path.abspath(os.path.join(self.directory, conf["directory"], conf["gds_filename"]))
powered_verilog = os.path.abspath(os.path.join(self.directory, conf["directory"], conf["lvs_filename"]))
shutil.copyfile(gds_file, os.path.join(lvs_test_dir, conf["gds_filename"]))
shutil.copyfile(powered_verilog, os.path.join(lvs_test_dir, conf["lvs_filename"]))
gds_file = conf["gds_filename"]
powered_verilog = conf["lvs_filename"]
# generated files
ext_file = module_name + ".ext"
log_file = module_name + ".log"
spice_file = module_name + '.spice'
netgen_log_file = module_name + '.netgen_log'
netgen_json = module_name + '.json'
extract_tcl = 'extract.tcl'
# config files
pdk_path = self.system_config['lvs']['PDK_PATH']
openlane_root = self.system_config['lvs']['OPENLANE']
logging.info("using PDK %s and OpenLANE %s" % (pdk_path, openlane_root))
# env
test_env = os.environ.copy()
test_env["MAGIC_EXT_USE_GDS"] = "1"
test_env["PDKPATH"] = pdk_path
netgen_setup_file = os.path.join(pdk_path, 'libs.tech', 'netgen', 'sky130A_setup.tcl')
cwd = lvs_test_dir
# create tcl script for magic
tcl_contents = """
gds read %s;
load %s -dereference
extract do local;
extract no capacitance;
extract no coupling;
extract no resistance;
extract no adjust;
extract unique;
extract;
ext2spice lvs;
ext2spice %s;
feedback save %s;
exit;
""" % (gds_file, module_name, ext_file, log_file)
with open(os.path.join(lvs_test_dir, extract_tcl), 'w') as tcl:
tcl.write(tcl_contents)
magic_rcfile = os.path.join(pdk_path, 'libs.tech', 'magic', 'sky130A.magicrc')
cmd = ['magic', '-rcfile', magic_rcfile, '-noc', '-dnull', extract_tcl]
logging.info(' '.join(cmd))
subprocess.run(cmd, cwd=cwd, env=test_env, check=True)
left_side = '%s %s' % (spice_file, module_name)
right_side = '%s %s' % (powered_verilog, module_name)
# only way to get this quoted stuff to work was to use shell=True in the subprocess call
cmd = 'netgen -batch lvs "%s" "%s" %s %s -json' % (left_side, right_side, netgen_setup_file, netgen_log_file)
logging.info(cmd)
subprocess.run(cmd, env=test_env, cwd=cwd, check=True, shell=True)
lvs_count_cmd = os.path.join(openlane_root, 'scripts', 'count_lvs.py')
cmd = [lvs_count_cmd, '--file', netgen_json]
logging.info(cmd)
# lvs count command doesn't return valid exit codes
try:
result = subprocess.run(cmd, cwd=cwd, capture_output=True)
except subprocess.CalledProcessError as e:
logging.error(e)
exit(1)
# so search for string in output
if 'Total errors = 0' in str(result.stdout):
logging.info("LVS passed")
elif 'Total errors = 6' in str(result.stdout) and 'unmatched pins = 6' in str(result.stdout):
logging.info("LVS passed (waived 6 unconnected power pins)")
else:
logging.error(result.stdout)
exit(1)
def test_tristate_z(self):
# env
test_env = os.environ.copy()
test_env["POWERED_VERILOG"] = powered_verilog = os.path.abspath(os.path.join(self.directory, self.config["gds"]["directory"], self.config["gds"]["lvs_filename"]))
test_env["TOPLEVEL"] = self.config["caravel_test"]["module_name"]
test_env["PDK_ROOT"] = self.system_config["lvs"]["PDK_ROOT"]
cmd = ["make", "clean", "test"]
cwd = "buffertest"
logging.info("attempting to run %s in %s" % (cmd, cwd))
# run makefile
try:
subprocess.run(cmd, cwd=cwd, env=test_env, check=True)
except subprocess.CalledProcessError as e:
logging.error(e)
exit(1)
logging.info("tristate z test pass")
def validate_ports(self):
# assume first source is top, bad idea
sources = ""
for source_file in self.config['source']:
sources += os.path.join(self.directory, source_file)
sources += " "
top = self.config['caravel_test']['module_name']
# use yosys to parse the verilog and dump a list of ports
json_file = '/tmp/ports.json'
os.system("yosys -qp 'read_verilog -sv %s; hierarchy -top %s ; proc; json -o %s x:*' -DUSE_POWER_PINS=1 -DMPRJ_IO_PADS=38" % (sources, top, json_file))
with open(json_file) as fh:
ports = json.load(fh)
module_ports = ports['modules'][self.config['caravel_test']['module_name']]['ports']
# check required ports
for port_type, port_def in self.system_config['interfaces']['required'].items():
for port_name, bits in port_def.items():
# assert port is there
if port_name not in module_ports:
logging.error("required port %s not in interface" % port_name)
exit(1)
# and it's the correct length
if len(module_ports[port_name]['bits']) != bits:
logging.error("required port %s is wrong size" % port_name)
exit(1)
# delete it
del module_ports[port_name]
# for all the optional ports defined in the projects yaml
for optional_port in self.config['interfaces']:
# look up its definition
for port_name, bits in self.system_config['interfaces']['optional'][optional_port].items():
# assert port is there
if port_name not in module_ports:
logging.error("optional port %s was set but %s is not in interface" % (optional_port, port_name))
exit(1)
# and it's the correct length
if len(module_ports[port_name]['bits']) != bits:
logging.error("optional port %s is wrong size" % (port_name))
exit(1)
# delete it
del module_ports[port_name]
# module def should now be empty
if len(module_ports) != 0:
logging.error("additional interfaces found in module")
logging.error(module_ports)
exit(1)
logging.info("test ports pass")
| 39.708333 | 173 | 0.600333 | [
"Apache-2.0"
] | mattvenn/multi_project_tools | project.py | 16,201 | Python |
"""
MX Platform API
The MX Platform API is a powerful, fully-featured API designed to make aggregating and enhancing financial data easy and reliable. It can seamlessly connect your app or website to tens of thousands of financial institutions. # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import mx_platform_python
from mx_platform_python.model.statement_response import StatementResponse
class TestStatementResponse(unittest.TestCase):
"""StatementResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStatementResponse(self):
"""Test StatementResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = StatementResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.972222 | 242 | 0.717647 | [
"MIT"
] | mxenabled/mx-platform-python | test/test_statement_response.py | 935 | Python |
"""Criteria for parametric methods.
.. topic:: This module provides criteria to automatically select order in
parametric PSD estimate or pseudo spectrum estimates (e.g, music).
Some criteria such as the AIC criterion helps to chose the order of PSD
models such as the ARMA model. Nevertheless, it is difficult to estimate
correctly the order of an ARMA model even by using these criteria. The
reason being that even the Akaike criteria (AIC) does not provide the
proper order with a probability of 1 with infinite samples.
The order choice is related to an expertise of the signal. There is no
exact criteria. However, they may provide useful information.
AIC, AICc, KIC and AKICc are based on information theory. They attempt
to balance the complexity (or length) of the model against how well the
model fits the data. AIC and KIC are biased estimates of the asymmetric
and the symmetric Kullback-Leibler divergence respectively. AICc and
AKICc attempt to correct the bias.
There are also criteria related to eigen analysis, which takes as input
the eigen values of any PSD estimate method.
.. rubric:: Example
.. plot::
:width: 80%
:include-source:
from spectrum import aryule, AIC, marple_data
from pylab import plot, arange
order = arange(1, 25)
rho = [aryule(marple_data, i, norm='biased')[1] for i in order]
plot(order, AIC(len(marple_data), rho, order), label='AIC')
:References: bd-Krim Seghouane and Maiza Bekara
"A small sample model selection criterion based on Kullback's symmetric
divergence", IEEE Transactions on Signal Processing,
Vol. 52(12), pp 3314-3323, Dec. 2004
"""
class Criteria(object):
"""Criteria class for an automatic selection of ARMA order.
Available criteria are
======= =====================
======= =====================
AIC see :func:`AIC`
AICc see :func:`AICc`
KIC see :func:`KIC`
AKICc see :func:`AKICc`
FPE see :func:`FPE`
MDL see :func:`MDL`
CAT see :func:`_CAT`
======= =====================
"""
valid_criteria_names = ['AIC', 'AICc', 'KIC', 'FPE', 'AKICc', 'MDL']
error_incorrect_name = 'Invalid name provided. Correct names are %s ' \
% valid_criteria_names
error_no_criteria_found = 'No names match the valid criteria names (%s)' \
% valid_criteria_names
def __init__(self, name, N):
"""Create a criteria object
:param name: a string or list of strings containing valid criteria
method's name
:param int N: size of the data sample.
"""
#valid attributes
self.__name = None
self.name = name
self.__N = N
self.__rho = 0
self.__k = None
self.__old_data = None
self.__data = None
self.__norm = True
def _getName(self):
return self.__name
def _setName(self, name):
assert isinstance(name, str), 'name must be a string'
if name in self.valid_criteria_names:
self.__name = name
else:
raise ValueError(self.error_no_criteria_found)
name = property(fget=_getName, fset=_setName, doc="Getter/Setter for the criteria name")
def _getData(self):
return self.__data
def _setData(self, data):
# save the data value in old_data is there is something to save
if self.data is None:
self.__data = data
self.__old_data = 2.*data
else:
self.__old_data = self.data
self.__data = data
data = property(fget=_getData, fset=_setData, doc="Getter/Setter for the criteria output")
def _getOldData(self):
return self.__old_data
old_data = property(fget=_getOldData, doc="Getter/Setter for the previous value")
def _getK(self):
return self.__k
k = property(fget=_getK, doc="Getter for k the order of evaluation")
def _getN(self):
return self.__N
def _setN(self, N):
assert N > 0, 'N must be positive'
self.__N = N
N = property(fget=_getN, fset=_setN, doc="Getter/Setter for N")
def _getRho(self):
return self.__rho
def _setRho(self, rho):
self.__rho = rho
rho = property(fget=_getRho, fset=_setRho, doc="Getter/Setter for rho")
def __call__(self, rho=None, k=None, N=None, norm=True):
"""Call the criteria function corresponding to :attr:`name`."""
self.__norm = norm
if N is not None:
self.N = N
# we update rho only if it is needed (input different from self.rho)
# if such case, we also update k
if rho is not None:
self.rho = rho
if k is not None:
self.__k = k
self.__norm = norm
#used to check if the criteria is reached or not
f = eval(self.name)
self.data = f(self.N, self.rho, self.k)
# compare the new data with the previous one and return
# False if the new value is larger so as to stop the iteration
if self.old_data is not None and self.data is not None:
if self.data > self.old_data:
return False
else:
return True
return True
def AIC(N, rho, k):
r"""Akaike Information Criterion
:param rho: rho at order k
:param N: sample size
:param k: AR order.
If k is the AR order and N the size of the sample, then Akaike criterion is
.. math:: AIC(k) = \log(\rho_k) + 2\frac{k+1}{N}
::
AIC(64, [0.5,0.3,0.2], [1,2,3])
:validation: double checked versus octave.
"""
from numpy import log, array
#k+1 #todo check convention. agrees with octave
res = N * log(array(rho)) + 2.* (array(k)+1)
return res
def AICc(N, rho, k, norm=True):
r"""corrected Akaike information criterion
.. math:: AICc(k) = log(\rho_k) + 2 \frac{k+1}{N-k-2}
:validation: double checked versus octave.
"""
from numpy import log, array
p = k #todo check convention. agrees with octave
res = log(rho) + 2. * (p+1) / (N-p-2)
return res
def KIC(N, rho, k):
r"""Kullback information criterion
.. math:: KIC(k) = log(\rho_k) + 3 \frac{k+1}{N}
:validation: double checked versus octave.
"""
from numpy import log, array
res = log(rho) + 3. * (k+1.) /float(N)
return res
def AKICc(N, rho, k):
r"""approximate corrected Kullback information
.. math:: AKICc(k) = log(rho_k) + \frac{p}{N*(N-k)} + (3-\frac{k+2}{N})*\frac{k+1}{N-k-2}
"""
from numpy import log, array
p = k
res = log(rho) + p/N/(N-p) + (3.-(p+2.)/N) * (p+1.) / (N-p-2.)
return res
def FPE(N,rho, k=None):
r"""Final prediction error criterion
.. math:: FPE(k) = \frac{N + k + 1}{N - k - 1} \rho_k
:validation: double checked versus octave.
"""
#k #todo check convention. agrees with octave
fpe = rho * (N + k + 1.) / (N- k -1)
return fpe
def MDL(N, rho, k):
r"""Minimum Description Length
.. math:: MDL(k) = N log \rho_k + p \log N
:validation: results
"""
from numpy import log
#p = arange(1, len(rho)+1)
mdl = N* log(rho) + k * log(N)
return mdl
def CAT(N, rho, k):
r"""Criterion Autoregressive Transfer Function :
.. math:: CAT(k) = \frac{1}{N} \sum_{i=1}^k \frac{1}{\rho_i} - \frac{\rho_i}{\rho_k}
.. todo:: validation
"""
from numpy import zeros, arange
cat = zeros(len(rho))
for p in arange(1, len(rho)+1):
rho_p = float(N)/(N-p)*rho[p-1]
s = 0
for j in range(1, p+1):
rho_j = float(N)/(N-j)*rho[j-1]
s = s + 1./rho_j
#print(s, s/float(N), 1./rho_p)
cat[p-1] = s/float(N) - 1./rho_p
return cat
def aic_eigen(s, N):
r"""AIC order-selection using eigen values
:param s: a list of `p` sorted eigen values
:param N: the size of the input data. To be defined precisely.
:return:
* an array containing the AIC values
Given :math:`n` sorted eigen values :math:`\lambda_i` with
:math:`0 <= i < n`, the proposed criterion from Wax and Kailath (1985)
is:
.. math:: AIC(k) = -2(n-k)N \ln \frac{g(k)}{a(k)} + 2k(2n-k)
where the arithmetic sum :math:`a(k)` is:
.. math:: a(k) = \sum_{i=k+1}^{n}\lambda_i
and the geometric sum :math:`g(k)` is:
.. math:: g(k) = \prod_{i=k+1}^{n} \lambda_i^{-(n-k)}
The number of relevant sinusoids in the signal subspace is determined by
selecting the minimum of `AIC`.
.. seealso:: :func:`~spectrum.eigenfreq.eigen`
.. todo:: define precisely the input parameter N. Should be the input
data length but when using correlation matrix (SVD), I suspect it
should be the length of the correlation matrix rather than the
original data.
:References:
* [Marple]_ Chap 13,
* [Wax]_
"""
import numpy as np
kaic = []
n = len(s)
for k in range(0, n-1):
ak = 1./(n-k) * np.sum(s[k+1:])
gk = np.prod(s[k+1:]**(1./(n-k)))
kaic.append( -2.*(n-k)*N * np.log(gk/ak) + 2.*k*(2.*n-k))
return kaic
def mdl_eigen(s, N):
r"""MDL order-selection using eigen values
:param s: a list of `p` sorted eigen values
:param N: the size of the input data. To be defined precisely.
:return:
* an array containing the AIC values
.. math:: MDL(k) = (n-k)N \ln \frac{g(k)}{a(k)} + 0.5k(2n-k) log(N)
.. seealso:: :func:`aic_eigen` for details
:References:
* [Marple]_ Chap 13,
* [Wax]_
"""
import numpy as np
kmdl = []
n = len(s)
for k in range(0, n-1):
ak = 1./(n-k) * np.sum(s[k+1:])
gk = np.prod(s[k+1:]**(1./(n-k)))
kmdl.append( -(n-k)*N * np.log(gk/ak) + 0.5*k*(2.*n-k)*np.log(N))
return kmdl
| 29.138643 | 94 | 0.591921 | [
"BSD-3-Clause"
] | butala/spectrum | src/spectrum/criteria.py | 9,878 | Python |
from tkinter import *
class MainFrame(Frame):
def __init__(self, parent):
super().__init__()
self['bd'] = 1
self['relief'] = SOLID
self['padx'] = 5
self['pady'] = 5
self.label_text1 = StringVar()
self.label_text1.set('Digite seu nome')
self.text_text1 = StringVar()
# Widgets
self.label1 = Label(self, textvariable=self.label_text1).grid()
text1 = Entry(self, textvariable=self.text_text1).grid(pady=2)
btn1 = Button(self, text='Clique', command=self.executar).grid()
def executar(self):
if not self.text_text1.get():
self.label_text1.set('Você não digitou nada')
else:
self.label_text1.set(f'Olá, {self.text_text1.get().capitalize()}!')
self.text_text1.set('')
root = Tk()
root.title('Passar valor')
icone = PhotoImage(file='images/icon.png')
root.iconphoto(False, icone)
root.geometry('200x110')
MainFrame(root).pack(pady=10)
root.mainloop() | 28.857143 | 79 | 0.612871 | [
"MIT"
] | JonasJF360/Curso_Tkinter | Aulas/app001/ex18-praticando_frame.py | 1,013 | Python |
__author__ = "Laurence Elliott - 16600748"
import os, math
import numpy as np
# sampleLens = []
# count = 0
# for file in os.listdir("corpus"):
# sample = np.load("corpus/" + file)
# zeroArr = [0]
# try:
# zerosInSample = np.isin(sample, zeroArr)
# zerosIndexes = np.where(zerosInSample)
# zerosStart = zerosIndexes[0][0]
# sample = sample[:zerosStart]
# sampleLen = len(sample)
# print(count, sampleLen)
# sampleLens.append(len(sample))
# except:
# sampleLen = len(sample)
# print(count, sampleLen)
# sampleLens.append(len(sample))
# count += 1
# # sample = np.concatenate((sample[0:200], sample[::-1][0:200]))
#
# minSampleLen = np.min(sampleLens)
# print(minSampleLen)
# Min sample length is 18 bytes D:
maxSequenceLen = 10000
lenSqrt = int(math.sqrt(maxSequenceLen))
print(lenSqrt)
count = 0
for file in os.listdir("corpus"):
sample = np.load("corpus/" + file)[:maxSequenceLen]
sample = np.rint(((sample - np.min(sample)) /
(np.max(sample) - np.min(sample))) * 255)\
.astype('int').reshape(lenSqrt, lenSqrt, 1)
np.save("corpusTrunc/" + file, sample)
print(count)
count += 1 | 29.190476 | 69 | 0.607667 | [
"MIT"
] | laurencejbelliott/Ensemble_DL_Ransomware_Detector | bin-utf8-vec/truncateCorpus.py | 1,226 | Python |
# This file is part of the MapProxy project.
# Copyright (C) 2010, 2011 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import sys
from collections import deque
from contextlib import contextmanager
import time
try:
import Queue
except ImportError:
import queue as Queue
from mapproxy.config import base_config
from mapproxy.grid import MetaGrid
from mapproxy.source import SourceError
from mapproxy.config import local_base_config
from mapproxy.compat.itertools import izip_longest
from mapproxy.util.lock import LockTimeout
from mapproxy.seed.util import format_seed_task, timestamp
from mapproxy.seed.cachelock import DummyCacheLocker, CacheLockedError
from mapproxy.seed.util import (exp_backoff, limit_sub_bbox,
status_symbol, BackoffError)
import logging
log = logging.getLogger(__name__)
NONE = 0
CONTAINS = -1
INTERSECTS = 1
# do not use multiprocessing on windows, it blows
# no lambdas, no anonymous functions/classes, no base_config(), etc.
if sys.platform == 'win32':
import threading
proc_class = threading.Thread
queue_class = Queue.Queue
else:
import multiprocessing
proc_class = multiprocessing.Process
queue_class = multiprocessing.Queue
class TileWorkerPool(object):
"""
Manages multiple TileWorker.
"""
def __init__(self, task, worker_class, size=2, dry_run=False, progress_logger=None):
self.tiles_queue = queue_class(size)
self.task = task
self.dry_run = dry_run
self.procs = []
self.progress_logger = progress_logger
conf = base_config()
for _ in range(size):
worker = worker_class(self.task, self.tiles_queue, conf)
worker.start()
self.procs.append(worker)
def process(self, tiles, progress):
if not self.dry_run:
while True:
try:
self.tiles_queue.put(tiles, timeout=5)
except Queue.Full:
alive = False
for proc in self.procs:
if proc.is_alive():
alive = True
break
if not alive:
log.warn('no workers left, stopping')
raise SeedInterrupted
continue
else:
break
if self.progress_logger:
self.progress_logger.log_step(progress)
def stop(self, force=False):
"""
Stop seed workers by sending None-sentinel and joining the workers.
:param force: Skip sending None-sentinel and join with a timeout.
For use when workers might be shutdown already by KeyboardInterrupt.
"""
if not force:
alives = 0
for proc in self.procs:
if proc.is_alive():
alives += 1
while alives:
# put None-sentinels to queue as long as we have workers alive
try:
self.tiles_queue.put(None, timeout=1)
alives -= 1
except Queue.Full:
alives = 0
for proc in self.procs:
if proc.is_alive():
alives += 1
if force:
timeout = 1.0
else:
timeout = None
for proc in self.procs:
proc.join(timeout)
class TileWorker(proc_class):
def __init__(self, task, tiles_queue, conf):
proc_class.__init__(self)
proc_class.daemon = True
self.task = task
self.tile_mgr = task.tile_manager
self.tiles_queue = tiles_queue
self.conf = conf
def run(self):
with local_base_config(self.conf):
try:
self.work_loop()
except KeyboardInterrupt:
return
except BackoffError:
return
class TileSeedWorker(TileWorker):
def work_loop(self):
while True:
tiles = self.tiles_queue.get()
if tiles is None:
return
with self.tile_mgr.session():
exp_backoff(self.tile_mgr.load_tile_coords, args=(tiles,),
max_repeat=100, max_backoff=600,
exceptions=(SourceError, IOError), ignore_exceptions=(LockTimeout, ))
class TileCleanupWorker(TileWorker):
def work_loop(self):
while True:
tiles = self.tiles_queue.get()
if tiles is None:
return
with self.tile_mgr.session():
self.tile_mgr.remove_tile_coords(tiles)
class SeedProgress(object):
def __init__(self, old_progress_identifier=None):
self.progress = 0.0
self.level_progress_percentages = [1.0]
self.level_progresses = None
self.level_progresses_level = 0
self.progress_str_parts = []
self.old_level_progresses = old_progress_identifier
def step_forward(self, subtiles=1):
self.progress += self.level_progress_percentages[-1] / subtiles
@property
def progress_str(self):
return ''.join(self.progress_str_parts)
@contextmanager
def step_down(self, i, subtiles):
if self.level_progresses is None:
self.level_progresses = []
self.level_progresses = self.level_progresses[:self.level_progresses_level]
self.level_progresses.append((i, subtiles))
self.level_progresses_level += 1
self.progress_str_parts.append(status_symbol(i, subtiles))
self.level_progress_percentages.append(self.level_progress_percentages[-1] / subtiles)
yield
self.level_progress_percentages.pop()
self.progress_str_parts.pop()
self.level_progresses_level -= 1
if self.level_progresses_level == 0:
self.level_progresses = []
def already_processed(self):
return self.can_skip(self.old_level_progresses, self.level_progresses)
def current_progress_identifier(self):
if self.already_processed() or self.level_progresses is None:
return self.old_level_progresses
return self.level_progresses[:]
@staticmethod
def can_skip(old_progress, current_progress):
"""
Return True if the `current_progress` is behind the `old_progress` -
when it isn't as far as the old progress.
>>> SeedProgress.can_skip(None, [(0, 4)])
False
>>> SeedProgress.can_skip([], [(0, 4)])
True
>>> SeedProgress.can_skip([(0, 4)], None)
False
>>> SeedProgress.can_skip([(0, 4)], [(0, 4)])
False
>>> SeedProgress.can_skip([(1, 4)], [(0, 4)])
True
>>> SeedProgress.can_skip([(0, 4)], [(0, 4), (0, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (1, 4)])
True
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (2, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (3, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (1, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (1, 4), (0, 4)])
False
"""
if current_progress is None:
return False
if old_progress is None:
return False
if old_progress == []:
return True
for old, current in izip_longest(old_progress, current_progress, fillvalue=None):
if old is None:
return False
if current is None:
return False
if old < current:
return False
if old > current:
return True
return False
def running(self):
return True
class StopProcess(Exception):
pass
class SeedInterrupted(Exception):
pass
class TileWalker(object):
"""
TileWalker traverses through all tiles in a tile grid and calls worker_pool.process
for each (meta) tile. It traverses the tile grid (pyramid) depth-first.
Intersection with coverages are checked before handling subtiles in the next level,
allowing to determine if all subtiles should be seeded or skipped.
"""
def __init__(self, task, worker_pool, handle_stale=False, handle_uncached=False,
work_on_metatiles=True, skip_geoms_for_last_levels=0, progress_logger=None,
seed_progress=None):
self.tile_mgr = task.tile_manager
self.task = task
self.worker_pool = worker_pool
self.handle_stale = handle_stale
self.handle_uncached = handle_uncached
self.work_on_metatiles = work_on_metatiles
self.skip_geoms_for_last_levels = skip_geoms_for_last_levels
self.progress_logger = progress_logger
num_seed_levels = len(task.levels)
if num_seed_levels >= 4:
self.report_till_level = task.levels[num_seed_levels-2]
else:
self.report_till_level = task.levels[num_seed_levels-1]
meta_size = self.tile_mgr.meta_grid.meta_size if self.tile_mgr.meta_grid else (1, 1)
self.tiles_per_metatile = meta_size[0] * meta_size[1]
self.grid = MetaGrid(self.tile_mgr.grid, meta_size=meta_size, meta_buffer=0)
self.count = 0
self.seed_progress = seed_progress or SeedProgress()
# It is possible that we 'walk' through the same tile multiple times
# when seeding irregular tile grids[0]. limit_sub_bbox prevents that we
# recurse into the same area multiple times, but it is still possible
# that a tile is processed multiple times. Locking prevents that a tile
# is seeded multiple times, but it is possible that we count the same tile
# multiple times (in dry-mode, or while the tile is in the process queue).
# Tile counts can be off by 280% with sqrt2 grids.
# We keep a small cache of already processed tiles to skip most duplicates.
# A simple cache of 64 tile coordinates for each level already brings the
# difference down to ~8%, which is good enough and faster than a more
# sophisticated FIFO cache with O(1) lookup, or even caching all tiles.
# [0] irregular tile grids: where one tile does not have exactly 4 subtiles
# Typically when you use res_factor, or a custom res list.
self.seeded_tiles = {l: deque(maxlen=64) for l in task.levels}
def walk(self):
assert self.handle_stale or self.handle_uncached
bbox = self.task.coverage.extent.bbox_for(self.tile_mgr.grid.srs)
if self.seed_progress.already_processed():
# nothing to seed
self.seed_progress.step_forward()
else:
try:
self._walk(bbox, self.task.levels)
except StopProcess:
pass
self.report_progress(self.task.levels[0], self.task.coverage.bbox)
def _walk(self, cur_bbox, levels, current_level=0, all_subtiles=False):
"""
:param cur_bbox: the bbox to seed in this call
:param levels: list of levels to seed
:param all_subtiles: seed all subtiles and do not check for
intersections with bbox/geom
"""
bbox_, tiles, subtiles = self.grid.get_affected_level_tiles(cur_bbox, current_level)
total_subtiles = tiles[0] * tiles[1]
if len(levels) < self.skip_geoms_for_last_levels:
# do not filter in last levels
all_subtiles = True
subtiles = self._filter_subtiles(subtiles, all_subtiles)
if current_level in levels and current_level <= self.report_till_level:
self.report_progress(current_level, cur_bbox)
if not self.seed_progress.running():
if current_level in levels:
self.report_progress(current_level, cur_bbox)
self.tile_mgr.cleanup()
raise StopProcess()
process = False;
if current_level in levels:
levels = levels[1:]
process = True
for i, (subtile, sub_bbox, intersection) in enumerate(subtiles):
if subtile is None: # no intersection
self.seed_progress.step_forward(total_subtiles)
continue
if levels: # recurse to next level
sub_bbox = limit_sub_bbox(cur_bbox, sub_bbox)
if intersection == CONTAINS:
all_subtiles = True
else:
all_subtiles = False
with self.seed_progress.step_down(i, total_subtiles):
if self.seed_progress.already_processed():
self.seed_progress.step_forward()
else:
self._walk(sub_bbox, levels, current_level=current_level+1,
all_subtiles=all_subtiles)
if not process:
continue
# check if subtile was already processed. see comment in __init__
if subtile in self.seeded_tiles[current_level]:
if not levels:
self.seed_progress.step_forward(total_subtiles)
continue
self.seeded_tiles[current_level].appendleft(subtile)
if not self.work_on_metatiles:
# collect actual tiles
handle_tiles = self.grid.tile_list(subtile)
else:
handle_tiles = [subtile]
if self.handle_uncached:
handle_tiles = [t for t in handle_tiles if
t is not None and
not self.tile_mgr.is_cached(t)]
elif self.handle_stale:
handle_tiles = [t for t in handle_tiles if
t is not None and
self.tile_mgr.is_stale(t)]
if handle_tiles:
self.count += 1
self.worker_pool.process(handle_tiles, self.seed_progress)
if not levels:
self.seed_progress.step_forward(total_subtiles)
if len(levels) >= 4:
# call cleanup to close open caches
# for connection based caches
self.tile_mgr.cleanup()
def report_progress(self, level, bbox):
if self.progress_logger:
self.progress_logger.log_progress(self.seed_progress, level, bbox,
self.count * self.tiles_per_metatile)
def _filter_subtiles(self, subtiles, all_subtiles):
"""
Return an iterator with all sub tiles.
Yields (None, None, None) for non-intersecting tiles,
otherwise (subtile, subtile_bbox, intersection).
"""
for subtile in subtiles:
if subtile is None:
yield None, None, None
else:
sub_bbox = self.grid.meta_tile(subtile).bbox
if all_subtiles:
intersection = CONTAINS
else:
intersection = self.task.intersects(sub_bbox)
if intersection:
yield subtile, sub_bbox, intersection
else:
yield None, None, None
class SeedTask(object):
def __init__(self, md, tile_manager, levels, refresh_timestamp, coverage):
self.md = md
self.tile_manager = tile_manager
self.grid = tile_manager.grid
self.levels = levels
self.refresh_timestamp = refresh_timestamp
self.coverage = coverage
@property
def id(self):
return self.md['name'], self.md['cache_name'], self.md['grid_name'], tuple(self.levels)
def intersects(self, bbox):
if self.coverage.contains(bbox, self.grid.srs): return CONTAINS
if self.coverage.intersects(bbox, self.grid.srs): return INTERSECTS
return NONE
class CleanupTask(object):
"""
:param coverage: area for the cleanup
:param complete_extent: ``True`` if `coverage` equals the extent of the grid
"""
def __init__(self, md, tile_manager, levels, remove_timestamp, coverage, complete_extent=False):
self.md = md
self.tile_manager = tile_manager
self.grid = tile_manager.grid
self.levels = levels
self.remove_timestamp = remove_timestamp
self.coverage = coverage
self.complete_extent = complete_extent
@property
def id(self):
return 'cleanup', self.md['name'], self.md['cache_name'], self.md['grid_name']
def intersects(self, bbox):
if self.coverage.contains(bbox, self.grid.srs): return CONTAINS
if self.coverage.intersects(bbox, self.grid.srs): return INTERSECTS
return NONE
def seed(tasks, concurrency=2, dry_run=False, skip_geoms_for_last_levels=0,
progress_logger=None, cache_locker=None):
if cache_locker is None:
cache_locker = DummyCacheLocker()
active_tasks = tasks[::-1]
while active_tasks:
task = active_tasks[-1]
print(format_seed_task(task))
wait = len(active_tasks) == 1
try:
with cache_locker.lock(task.md['cache_name'], no_block=not wait):
if progress_logger and progress_logger.progress_store:
progress_logger.current_task_id = task.id
start_progress = progress_logger.progress_store.get(task.id)
else:
start_progress = None
seed_progress = SeedProgress(old_progress_identifier=start_progress)
seed_task(task, concurrency, dry_run, skip_geoms_for_last_levels, progress_logger,
seed_progress=seed_progress)
except CacheLockedError:
print(' ...cache is locked, skipping')
active_tasks = [task] + active_tasks[:-1]
else:
active_tasks.pop()
def seed_task(task, concurrency=2, dry_run=False, skip_geoms_for_last_levels=0,
progress_logger=None, seed_progress=None):
if task.coverage is False:
return
if task.refresh_timestamp is not None:
task.tile_manager._expire_timestamp = task.refresh_timestamp
task.tile_manager.minimize_meta_requests = False
work_on_metatiles = True
if task.tile_manager.rescale_tiles:
work_on_metatiles = False
tile_worker_pool = TileWorkerPool(task, TileSeedWorker, dry_run=dry_run,
size=concurrency, progress_logger=progress_logger)
tile_walker = TileWalker(task, tile_worker_pool, handle_uncached=True,
skip_geoms_for_last_levels=skip_geoms_for_last_levels, progress_logger=progress_logger,
seed_progress=seed_progress,
work_on_metatiles=work_on_metatiles,
)
try:
tile_walker.walk()
except KeyboardInterrupt:
tile_worker_pool.stop(force=True)
raise
finally:
tile_worker_pool.stop()
| 37.135593 | 100 | 0.611491 | [
"ECL-2.0",
"Apache-2.0"
] | GeoplexGIS/mapproxy | mapproxy/seed/seeder.py | 19,719 | Python |
import minimalmodbus
import serial.tools.list_ports
import argparse
import time
#Creates a new instance of a minimal modbus connection
#Change portname to whatever you're using (/dev/USB0, COM4, etc)
#Or just change it when you create the new serial object
#247 is the default address for Renogy devices
class RenogySmartBattery(minimalmodbus.Instrument):
def __init__(self, portname="/dev/USB0", slaveaddress=247, baudrate=9600, timeout=0.5):
minimalmodbus.Instrument.__init__(self, portname, slaveaddress)
self.serial.baudrate = baudrate
self.serial.timeout = timeout
self.address = slaveaddress
self.amps = 0
self.unitVolts = 0
self.cellVolts = []
self.numCells = 4
self.capacity = 0
self.maxCapacity = 0
self.percentage = 0
self.state = "Error"
self.heater = False
self.cellTemp = []
self.cycles = 0
self.batSerial = ""
#Reads number of Cells
try:
self.numCells = self.read_register(5000)
except Exception as e:
print("Error getting number of cells")
#Reads the Serial Number
try:
self.batSerial = self.read_registers(5110,6)
except Exception as e:
print("Error reading the serial number")
def update(self):
#Gets unit current flow in A (0), unit voltage (1), capacity in AH (2,3), max capacity (4,5), cycle nums (6)
try:
battInfo = self.read_registers(5042,7)
self.amps = battInfo[0] / 100 if battInfo[0] < 61440 else (battInfo[0] - 65535) / 100
self.unitVolts = battInfo[1] / 10
self.capacity = ( battInfo[2] << 15 | (battInfo[3] >> 1) ) * 0.002
self.Maxcapacity = ( battInfo[4] << 15 | (battInfo[5] >> 1) ) * 0.002
self.cycles = battInfo[6]
except Exception as e:
print("Error getting Unit info" + e)
#Gets heater status
try:
heaterInfo = self.read_register(5013)
self.heater = (heaterInfo / 255) * 100
except Exception as e:
print("Error getting heater info" + e)
#Get individual cell info
try:
self.cellTemp = self.read_registers(5018, self.numCells)
self.cellVolts = self.read_registers(5001, self.numCells)
except Exception as e:
print("Error getting individual cell info")
def getNumCells(self):
return self.numCells
#sets the address of the battery
def setAddress(self, address):
self.address = address
#Gets the amperage flow of the battery
def getAmps(self):
return self.amps
#Returns a list of the cell voltages
def getCellVolts(self):
return [x / 19 for x in self.cellVolts]
#Returns number of cycles on the battery
def getCycles(self):
return self.cycles
#Returns the serial number
def getSerial(self):
return ''.join(self.batSerial)
#Gets the voltage of the battery
def getUnitVolts(self):
return self.unitVolts
#Gets the current AH of the battery
def getCapacity(self):
return self.capacity
#Gets the max capacity of the battery
def getMax_capacity(self):
return self.maxCapacity
#Gets the percentage full of the battery
def getPercentage(self):
return self.capacity / self.maxCapacity
#Gets the state of the battery (Charging, Discharging, or Error)
def getState(self):
if self.amps < 0: return "DISCHARGING"
elif self.amps > 0: return "CHARGING"
return "IDLE"
#For the self-heating batteries, gets if the battery is on and how much (0-100)
def getHeater(self):
return self.heater
#Gets the overall temperature of the battery by getting the average temperature of the cells
def getBatteryTemp(self):
return sum(self.cellTemp) / len(self.cellTemp)
#Reads a specific register
def readRegister(self, register):
try:
return self.read_register(register)
except Exception as e:
print(e)
def readRegisters(self, startRegister, numRegisters):
try:
return self.read_registers(startRegister, numRegisters)
except Exception as e:
print(e)
#Writes a specific register
def writeRegister(self, register, value):
try:
return self.write_register(register, value)
except Exception as e:
print(e)
#Utilizes the write register to change the slave address of the battery
def changeAddress(self, value, address):
try:
return self.writeRegister(5223,value, address)
except Exception as e:
print(e)
#Main Method for demonstration
def main():
#main two arguments are the identifier of the USB connection and the address to connect to.
renogy = RenogySmartBattery("/dev/USB0", 50)
print(renogy.volts(51))
print(renogy.amps(51))
if __name__ == "__main__":
main() | 32.063694 | 114 | 0.635081 | [
"MIT"
] | epfenninger/HA-Renogy | Renogy.py | 5,034 | Python |
import math
import os
from pathlib import Path
from typing import Iterable, Union
import mmcv
import torch
from pytorch3d.structures.meshes import Meshes
from tqdm import trange
import mmhuman3d
from mmhuman3d.core.cameras import compute_orbit_cameras
from mmhuman3d.core.conventions.cameras import convert_cameras
from .builder import build_renderer
osj = os.path.join
def render(
output_path: str,
device: Union[str, torch.device] = 'cpu',
meshes: Meshes = None,
render_choice: str = 'base',
batch_size: int = 5,
K: torch.Tensor = None,
R: torch.Tensor = None,
T: torch.Tensor = None,
projection: str = 'perspective',
orbit_speed: Union[Iterable[float], float] = 0.0,
dist: float = 2.7,
dist_speed=1.0,
in_ndc: bool = True,
resolution=[1024, 1024],
convention: str = 'pytorch3d',
no_grad: bool = False,
return_tensor: bool = True,
):
RENDER_CONFIGS = mmcv.Config.fromfile(
os.path.join(
Path(mmhuman3d.__file__).parents[1],
'configs/render/smpl.py'))['RENDER_CONFIGS'][render_choice]
renderer = build_renderer(
dict(
type=RENDER_CONFIGS['renderer_type'],
device=device,
resolution=resolution,
projection=projection,
output_path=output_path,
return_type=['tensor'] if return_tensor else None,
in_ndc=in_ndc,
**RENDER_CONFIGS))
num_frames = len(meshes)
if K is None or R is None or T is None:
K, R, T = compute_orbit_cameras(
orbit_speed=orbit_speed,
dist=dist,
batch_size=num_frames,
dist_speed=dist_speed)
if projection in ['perspective', 'fovperspective']:
is_perspective = True
else:
is_perspective = False
K, R, T = convert_cameras(
resolution_dst=resolution,
resolution_src=resolution,
in_ndc_dst=in_ndc,
in_ndc_src=in_ndc,
K=K,
R=R,
T=T,
is_perspective=is_perspective,
convention_src=convention,
convention_dst='pytorch3d')
tensors = []
for i in trange(math.ceil(num_frames // batch_size)):
indexes = list(
range(i * batch_size, min((i + 1) * batch_size, len(meshes))))
if no_grad:
with torch.no_grad():
images_batch = renderer(
meshes=meshes[indexes],
K=K[indexes],
R=R[indexes],
T=T[indexes],
indexes=indexes)
else:
images_batch = renderer(
meshes=meshes[indexes],
K=K[indexes],
R=R[indexes],
T=T[indexes],
indexes=indexes)
tensors.append(images_batch['tensor'])
tensors = torch.cat(tensors)
renderer.export()
return tensors
| 28.54902 | 74 | 0.590316 | [
"Apache-2.0"
] | mingyuan-zhang/mmhuman3d | mmhuman3d/core/visualization/renderer/torch3d_renderer/render_runner.py | 2,912 | Python |
import pygame
import random
import helpers
from ItemManager import ItemManager
import scorer
class BlockManager:
def __init__(self,main):
self.main = main
self.blockSize = 75
self.gridWidth = 12
self.gridHeight = 12
self.grid = []
for x in range(0,self.gridWidth):
newColumn = []
for y in range(0,self.gridHeight):
newColumn.append(None)
self.grid.append(newColumn)
self.numTypes = 8
self.images = []
for x in range(0,self.numTypes):
self.images.append(helpers.loadTGA(str(x))[0])
self.maxTimeTillNew = 40
self.timeTillNew = self.maxTimeTillNew
self.moveTime = 0
self.moveFrec = 10
def compute(self):
self.calculateSpeed()
self.moveTime += 1
if self.moveTime % self.moveFrec == 0:
self.moveBlocksDown()
#Check for game over.
doneChecking = 0
y = 0
while y < self.gridHeight and not doneChecking:
x = 0
while x < self.gridWidth and not doneChecking:
if self.grid[x][y] is None:
doneChecking = 1
x += 1
y += 1
if not doneChecking: #If none in the top row were None:
self.main.lose()
self.timeTillNew -= 1
if self.timeTillNew == 0:
self.getNewBlock()
self.timeTillNew = self.maxTimeTillNew
self.checkAdj()
def checkAdj(self):
#Check grid for triple adjacency.
for x in range(0,self.gridWidth):
for y in range(0,self.gridHeight):
if self.grid[x][y] is not None:
adjacents = helpers.getAdjacents(x,y,self.grid)
if len(adjacents) >= 3:
for point in adjacents:
self.grid[point[0]][point[1]] = None
self.main.explosionGraphics.getPoint(point[0]*self.blockSize+self.blockSize/2,point[1]*self.blockSize+self.blockSize/2)
#+self.blockSize/2 so it's in the center.
for anObject in self.main.objects:
if isinstance(anObject,scorer.Scorer):
anObject.getDestroyedBlocks(len(adjacents))
if isinstance(anObject, ItemManager):
anObject.getDestroyedBlocks(adjacents)
def getNewBlock(self):
pos = random.randint(0,self.gridWidth - 1)
while self.grid[pos][0] is not None:
pos = random.randint(0,self.gridWidth - 1)
col = random.randint(0,self.numTypes - 1)
self.grid[pos][0] = col
def moveBlocksDown(self):
#Move all blocks down.
for x in range(0,self.gridWidth):
for y in range(self.gridHeight-2,-1,-1): #From gridHeight-2 to 0. Blocks on the bottom (y=gridHeight - 1) won't move down no matter what.
if self.grid[x][y] is not None and self.grid[x][y + 1] is None:
self.grid[x][y + 1] = self.grid[x][y]
self.grid[x][y] = None
def draw(self,surface):
for y in range(0,self.gridHeight):
for x in range(0,self.gridWidth):
if self.grid[x][y] is not None:
surface.blit(self.images[self.grid[x][y]],(x*self.blockSize,y*self.blockSize))
def getDown(self):
self.moveBlocksDown()
self.moveTime = 0
if self.timeTillNew <= self.moveFrec:
self.getNewBlock()
self.timeTillNew = self.maxTimeTillNew
else:
self.timeTillNew -= self.moveFrec
def getRight(self):
#Remember: Blocks will not move right if there is a block directly below them.
for y in range(self.gridHeight-2,-1,-1): #From gridHeight-2 to 0. Blocks on the bottom (y=gridHeight - 1) won't move right no matter what.
for x in range(self.gridWidth-2,-1,-1): #From gridWidth-2 to 0. Blocks on the right (x=gridWidth - 1) won't move right no matter what.
if self.grid[x][y] is not None and self.grid[x + 1][y] is None and self.grid[x][y + 1] is None:
self.grid[x + 1][y] = self.grid[x][y]
self.grid[x][y] = None
def getLeft(self):
#Remember: Blocks will not move right if there is a block directly below them.
for y in range(self.gridHeight-2,-1,-1): #From gridHeight-2 to 0. Blocks on the bottom (y=gridHeight - 1) won't move left no matter what.
for x in range(1,self.gridWidth): #From 1 to gridWidth-1. Blocks on the left (x=0) won't move left no matter what.
if self.grid[x][y] is not None and self.grid[x - 1][y] is None and self.grid[x][y + 1] is None:
self.grid[x - 1][y] = self.grid[x][y]
self.grid[x][y] = None
def calculateSpeed(self):
for anObject in self.main.objects:
if isinstance(anObject,scorer.Scorer):
score = anObject.score
if isinstance(anObject, ItemManager):
itemManager = anObject
k = 0
if score > 10: k = 1
if score > 20: k = 2
if score > 50: k = 3
if score > 100: k = 4
if score > 200: k = 5
if score > 400: k = 6
if score > 600: k = 7
if score > 800: k = 8
if score > 1000: k = 9
if score > 2000: k = 10
if score > 3000: k = 11
if score > 4000: k = 12
if score > 5000: k = 13
if score == 9999: k = 14
self.maxTimeTillNew = {
0: 100,
1: 80,
2: 60,
3: 50,
4: 40,
5: 36,
6: 34,
7: 30,
8: 28,
9: 26,
10: 24,
11: 22,
12: 20,
13: 19,
14: 18
}[k]
if k <= 2:
self.moveFrec = 10
else:
self.moveFrec = self.maxTimeTillNew / 3
scorer.comboLastTime = self.maxTimeTillNew * 3
if k > 0:
itemManager.itemFrec = max(int(self.maxTimeTillNew * 2.5), 30 * 2.5) #128
itemManager.itemLastTime = itemManager.itemFrec * 8
itemManager.itemsAvailable = min(k, 8)
| 30.045198 | 141 | 0.632193 | [
"Unlicense"
] | andy-hanson/monis | src/BlockManager.py | 5,318 | Python |
# coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.vnic_eth_adapter_policy_list import VnicEthAdapterPolicyList # noqa: E501
from intersight.rest import ApiException
class TestVnicEthAdapterPolicyList(unittest.TestCase):
"""VnicEthAdapterPolicyList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVnicEthAdapterPolicyList(self):
"""Test VnicEthAdapterPolicyList"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.vnic_eth_adapter_policy_list.VnicEthAdapterPolicyList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 51.763158 | 1,052 | 0.785968 | [
"Apache-2.0"
] | CiscoUcs/intersight-python | test/test_vnic_eth_adapter_policy_list.py | 1,967 | Python |
import logging
from pyrogram.errors import InputUserDeactivated, UserNotParticipant, FloodWait, UserIsBlocked, PeerIdInvalid
from info import AUTH_CHANNEL, LONG_IMDB_DESCRIPTION, MAX_LIST_ELM
from imdb import IMDb
import asyncio
from pyrogram.types import Message
from typing import Union
import re
import os
from datetime import datetime
from typing import List
from pyrogram.types import InlineKeyboardButton
from database.users_chats_db import db
from bs4 import BeautifulSoup
import requests
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
BTN_URL_REGEX = re.compile(
r"(\[([^\[]+?)\]\((buttonurl|buttonalert):(?:/{0,2})(.+?)(:same)?\))"
)
imdb = IMDb()
BANNED = {}
SMART_OPEN = '“'
SMART_CLOSE = '”'
START_CHAR = ('\'', '"', SMART_OPEN)
# temp db for banned
class temp(object):
BANNED_USERS = []
BANNED_CHATS = []
ME = None
CURRENT=int(os.environ.get("SKIP", 2))
CANCEL = False
MELCOW = {}
U_NAME = None
B_NAME = None
async def is_subscribed(bot, query):
try:
user = await bot.get_chat_member(AUTH_CHANNEL, query.from_user.id)
except UserNotParticipant:
pass
except Exception as e:
logger.exception(e)
else:
if user.status != 'kicked':
return True
return False
async def get_poster(query, bulk=False, id=False, file=None):
if not id:
# https://t.me/GetTGLink/4183
query = (query.strip()).lower()
title = query
year = re.findall(r'[1-2]\d{3}$', query, re.IGNORECASE)
if year:
year = list_to_str(year[:1])
title = (query.replace(year, "")).strip()
elif file is not None:
year = re.findall(r'[1-2]\d{3}', file, re.IGNORECASE)
if year:
year = list_to_str(year[:1])
else:
year = None
movieid = imdb.search_movie(title.lower(), results=10)
if not movieid:
return None
if year:
filtered=list(filter(lambda k: str(k.get('year')) == str(year), movieid))
if not filtered:
filtered = movieid
else:
filtered = movieid
movieid=list(filter(lambda k: k.get('kind') in ['movie', 'tv series'], filtered))
if not movieid:
movieid = filtered
if bulk:
return movieid
movieid = movieid[0].movieID
else:
movieid = int(query)
movie = imdb.get_movie(movieid)
if movie.get("original air date"):
date = movie["original air date"]
elif movie.get("year"):
date = movie.get("year")
else:
date = "N/A"
plot = ""
if not LONG_IMDB_DESCRIPTION:
plot = movie.get('plot')
if plot and len(plot) > 0:
plot = plot[0]
else:
plot = movie.get('plot outline')
if plot and len(plot) > 800:
plot = plot[0:800] + "..."
return {
'title': movie.get('title'),
'votes': movie.get('votes'),
"aka": list_to_str(movie.get("akas")),
"seasons": movie.get("number of seasons"),
"box_office": movie.get('box office'),
'localized_title': movie.get('localized title'),
'kind': movie.get("kind"),
"imdb_id": f"tt{movie.get('imdbID')}",
"cast": list_to_str(movie.get("cast")),
"runtime": list_to_str(movie.get("runtimes")),
"countries": list_to_str(movie.get("countries")),
"certificates": list_to_str(movie.get("certificates")),
"languages": list_to_str(movie.get("languages")),
"director": list_to_str(movie.get("director")),
"writer":list_to_str(movie.get("writer")),
"producer":list_to_str(movie.get("producer")),
"composer":list_to_str(movie.get("composer")) ,
"cinematographer":list_to_str(movie.get("cinematographer")),
"music_team": list_to_str(movie.get("music department")),
"distributors": list_to_str(movie.get("distributors")),
'release_date': date,
'year': movie.get('year'),
'genres': list_to_str(movie.get("genres")),
'poster': movie.get('full-size cover url'),
'plot': plot,
'rating': str(movie.get("rating")),
'url':f'https://www.imdb.com/title/tt{movieid}'
}
# https://github.com/odysseusmax/animated-lamp/blob/2ef4730eb2b5f0596ed6d03e7b05243d93e3415b/bot/utils/broadcast.py#L37
async def broadcast_messages(user_id, message):
try:
await message.copy(chat_id=user_id)
return True, "Succes"
except FloodWait as e:
await asyncio.sleep(e.x)
return await broadcast_messages(user_id, message)
except InputUserDeactivated:
await db.delete_user(int(user_id))
logging.info(f"{user_id}-Removed from Database, since deleted account.")
return False, "Deleted"
except UserIsBlocked:
logging.info(f"{user_id} -Blocked the bot.")
return False, "Blocked"
except PeerIdInvalid:
await db.delete_user(int(user_id))
logging.info(f"{user_id} - PeerIdInvalid")
return False, "Error"
except Exception as e:
return False, "Error"
async def search_gagala(text):
usr_agent = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/61.0.3163.100 Safari/537.36'
}
text = text.replace(" ", '+')
url = f'https://www.google.com/search?q={text}'
response = requests.get(url, headers=usr_agent)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
titles = soup.find_all( 'h3' )
return [title.getText() for title in titles]
def get_size(size):
"""Get size in readable format"""
units = ["Bytes", "KB", "MB", "GB", "TB", "PB", "EB"]
size = float(size)
i = 0
while size >= 1024.0 and i < len(units):
i += 1
size /= 1024.0
return "%.2f %s" % (size, units[i])
def split_list(l, n):
for i in range(0, len
(l), n):
yield l[i:i + n]
def get_file_id(msg: Message):
if msg.media:
for message_type in (
"photo",
"animation",
"audio",
"document",
"video",
"video_note",
"voice",
"sticker"
):
obj = getattr(msg, message_type)
if obj:
setattr(obj, "message_type", message_type)
return obj
def extract_user(message: Message) -> Union[int, str]:
"""extracts the user from a message"""
# https://github.com/SpEcHiDe/PyroGramBot/blob/f30e2cca12002121bad1982f68cd0ff9814ce027/pyrobot/helper_functions/extract_user.py#L7
user_id = None
user_first_name = None
if message.reply_to_message:
user_id = message.reply_to_message.from_user.id
user_first_name = message.reply_to_message.from_user.first_name
elif len(message.command) > 1:
if (
len(message.entities) > 1 and
message.entities[1].type == "text_mention"
):
required_entity = message.entities[1]
user_id = required_entity.user.id
user_first_name = required_entity.user.first_name
else:
user_id = message.command[1]
# don't want to make a request -_-
user_first_name = user_id
try:
user_id = int(user_id)
except ValueError:
pass
else:
user_id = message.from_user.id
user_first_name = message.from_user.first_name
return (user_id, user_first_name)
def list_to_str(k):
if not k:
return "N/A"
elif len(k) == 1:
return str(k[0])
elif MAX_LIST_ELM:
k = k[:int(MAX_LIST_ELM)]
return ' '.join(f'{elem}, ' for elem in k)
else:
return ' '.join(f'{elem}, ' for elem in k)
def last_online(from_user):
time = ""
if from_user.is_bot:
time += "🤖 Bot :("
elif from_user.status == 'recently':
time += "Recently"
elif from_user.status == 'within_week':
time += "Within the last week"
elif from_user.status == 'within_month':
time += "Within the last month"
elif from_user.status == 'long_time_ago':
time += "A long time ago :("
elif from_user.status == 'online':
time += "Currently Online"
elif from_user.status == 'offline':
time += datetime.fromtimestamp(from_user.last_online_date).strftime("%a, %d %b %Y, %H:%M:%S")
return time
def split_quotes(text: str) -> List:
if not any(text.startswith(char) for char in START_CHAR):
return text.split(None, 1)
counter = 1 # ignore first char -> is some kind of quote
while counter < len(text):
if text[counter] == "\\":
counter += 1
elif text[counter] == text[0] or (text[0] == SMART_OPEN and text[counter] == SMART_CLOSE):
break
counter += 1
else:
return text.split(None, 1)
# 1 to avoid starting quote, and counter is exclusive so avoids ending
key = remove_escapes(text[1:counter].strip())
# index will be in range, or `else` would have been executed and returned
rest = text[counter + 1:].strip()
if not key:
key = text[0] + text[0]
return list(filter(None, [key, rest]))
def parser(text, keyword):
if "buttonalert" in text:
text = (text.replace("\n", "\\n").replace("\t", "\\t"))
buttons = []
note_data = ""
prev = 0
i = 0
alerts = []
for match in BTN_URL_REGEX.finditer(text):
# Check if btnurl is escaped
n_escapes = 0
to_check = match.start(1) - 1
while to_check > 0 and text[to_check] == "\\":
n_escapes += 1
to_check -= 1
# if even, not escaped -> create button
if n_escapes % 2 == 0:
note_data += text[prev:match.start(1)]
prev = match.end(1)
if match.group(3) == "buttonalert":
# create a thruple with button label, url, and newline status
if bool(match.group(5)) and buttons:
buttons[-1].append(InlineKeyboardButton(
text=match.group(2),
callback_data=f"alertmessage:{i}:{keyword}"
))
else:
buttons.append([InlineKeyboardButton(
text=match.group(2),
callback_data=f"alertmessage:{i}:{keyword}"
)])
i += 1
alerts.append(match.group(4))
elif bool(match.group(5)) and buttons:
buttons[-1].append(InlineKeyboardButton(
text=match.group(2),
url=match.group(4).replace(" ", "")
))
else:
buttons.append([InlineKeyboardButton(
text=match.group(2),
url=match.group(4).replace(" ", "")
)])
else:
note_data += text[prev:to_check]
prev = match.start(1) - 1
else:
note_data += text[prev:]
try:
return note_data, buttons, alerts
except:
return note_data, buttons, None
def remove_escapes(text: str) -> str:
res = ""
is_escaped = False
for counter in range(len(text)):
if is_escaped:
res += text[counter]
is_escaped = False
elif text[counter] == "\\":
is_escaped = True
else:
res += text[counter]
return res
def humanbytes(size):
if not size:
return ""
power = 2**10
n = 0
Dic_powerN = {0: ' ', 1: 'Ki', 2: 'Mi', 3: 'Gi', 4: 'Ti'}
while size > power:
size /= power
n +=
return str(round(size, 2)) + " " + Dic_powerN[n] + 'B'
| 32.124661 | 135 | 0.56985 | [
"Apache-2.0"
] | godismyloard/Alan-Walker | untils.py | 11,861 | Python |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 128
args_lr = 0.001
args_model = 'vgg16'
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_param/' + job_name + '*'
total_epochs = 5
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_param/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
def on_epoch_end(self, epoch, logs=None):
open('epoch/' + job_name + '.txt', 'a').close()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
if not args.resume:
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
param_dict = {}
modify = False
with open('param_lock.json', 'r') as fp:
param_dict = json.load(fp)
if job_name not in param_dict:
param_dict[job_name] = trainable_count
modify = True
elif param_dict[job_name] != trainable_count:
param_dict[job_name] = trainable_count
modify = True
if modify:
json_file = json.dumps(param_dict)
with open('param_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('param_lock.json', 'param.json')
ckpt_qual_dict = {}
while True:
if os.path.exists('ckpt_qual.json'):
os.rename('ckpt_qual.json', 'ckpt_qual_lock.json')
break
else:
time.sleep(1)
with open('ckpt_qual_lock.json', 'r') as fp:
ckpt_qual_dict = json.load(fp)
ckpt_qual_dict[job_name] = 1
json_file2 = json.dumps(ckpt_qual_dict)
with open('ckpt_qual_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('ckpt_qual_lock.json', 'ckpt_qual.json')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
finish_dict = {}
while True:
if os.path.exists('finish.json'):
os.rename('finish.json', 'finish_lock.json')
break
else:
time.sleep(1)
with open('finish_lock.json', 'r') as fp:
finish_dict = json.load(fp)
finish_dict[job_name] = 1
json_file2 = json.dumps(finish_dict)
with open('finish_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('finish_lock.json', 'finish.json')
| 31.416327 | 118 | 0.692737 | [
"MIT"
] | boringlee24/keras_old | examples/pwr_run/checkpointing/timed/max_par/job2.py | 7,697 | Python |
# vmimages.py - azurerm functions for Microsoft.Compute RP publishers and images
from .restfns import do_get
from .settings import azure_rm_endpoint, COMP_API
# list_offers(access_token, subscription_id, location, publisher)
# list available VM image offers from a publisher
def list_offers(access_token, subscription_id, location, publisher):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers/', publisher,
'/artifacttypes/vmimage/offers?api-version=', COMP_API])
return do_get(endpoint, access_token)
# list_publishers(access_token, subscription_id, location)
# list available image publishers for a location
def list_publishers(access_token, subscription_id, location):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers?api-version=', COMP_API])
return do_get(endpoint, access_token)
# list_skus(access_token, subscription_id, location, publisher, offer)
# list available VM image skus for a publisher offer
def list_skus(access_token, subscription_id, location, publisher, offer):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers/', publisher,
'/artifacttypes/vmimage/offers/', offer,
'/skus?api-version=', COMP_API])
return do_get(endpoint, access_token)
# list_sku_versions(access_token, subscription_id, location, publisher, offer, sku)
# list available versions for a given publisher's sku
def list_sku_versions(access_token, subscription_id, location, publisher, offer, sku):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers/', publisher,
'/artifacttypes/vmimage/offers/', offer,
'/skus/', sku,
'/versions?api-version=', COMP_API])
return do_get(endpoint, access_token)
| 46 | 86 | 0.609881 | [
"MIT"
] | pjshi23/mcazurerm | mcazurerm/vmimages.py | 2,530 | Python |
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Timing settings for all of pywinauto
This module has one object that should be used for all timing adjustments
timings.Timings
There are a couple of predefined settings
timings.Timings.Fast()
timings.Timings.Defaults()
timings.Timings.Slow()
The Following are the individual timing settings that can be adjusted:
* window_find_timeout (default 5)
* window_find_retry (default .09)
* app_start_timeout (default 10)
* app_start_retry (default .90)
* app_connect_timeout (default 5.)
* app_connect_retry (default .1)
* cpu_usage_interval (default .5)
* cpu_usage_wait_timeout (default 20)
* exists_timeout (default .5)
* exists_retry (default .3)
* after_click_wait (default .09)
* after_clickinput_wait (default .09)
* after_menu_wait (default .1)
* after_sendkeys_key_wait (default .01)
* after_button_click_wait (default 0)
* before_closeclick_wait (default .1)
* closeclick_retry (default .05)
* closeclick_dialog_close_wait (default 2)
* after_closeclick_wait (default .2)
* after_windowclose_timeout (default 2)
* after_windowclose_retry (default .5)
* after_setfocus_wait (default .06)
* setfocus_timeout (default 2)
* setfocus_retry (default .1)
* after_setcursorpos_wait (default .01)
* sendmessagetimeout_timeout (default .01)
* after_tabselect_wait (default .05)
* after_listviewselect_wait (default .01)
* after_listviewcheck_wait default(.001)
* listviewitemcontrol_timeout default(1.5)
* after_treeviewselect_wait default(.1)
* after_toobarpressbutton_wait default(.01)
* after_updownchange_wait default(.1)
* after_movewindow_wait default(0)
* after_buttoncheck_wait default(0)
* after_comboboxselect_wait default(.001)
* after_listboxselect_wait default(0)
* after_listboxfocuschange_wait default(0)
* after_editsetedittext_wait default(0)
* after_editselect_wait default(.02)
* drag_n_drop_move_mouse_wait default(.1)
* before_drag_wait default(.2)
* before_drop_wait default(.1)
* after_drag_n_drop_wait default(.1)
* scroll_step_wait default(.1)
"""
import six
import time
import operator
from functools import wraps
from . import deprecated
#=========================================================================
class TimeConfig(object):
"""Central storage and manipulation of timing values"""
__default_timing = {
'window_find_timeout': 5.,
'window_find_retry': .09,
'app_start_timeout': 10.,
'app_start_retry': .90,
'app_connect_timeout': 5.,
'app_connect_retry': .1,
'cpu_usage_interval': .5,
'cpu_usage_wait_timeout': 20.,
'exists_timeout': .5,
'exists_retry': .3,
'after_click_wait': .09,
'after_clickinput_wait': .09,
'after_menu_wait': .1,
'after_sendkeys_key_wait': .01,
'after_button_click_wait': 0,
'before_closeclick_wait': .1,
'closeclick_retry': .05,
'closeclick_dialog_close_wait': 2.,
'after_closeclick_wait': .2,
'after_windowclose_timeout': 2,
'after_windowclose_retry': .5,
'after_setfocus_wait': .06,
'setfocus_timeout': 2,
'setfocus_retry': .1,
'after_setcursorpos_wait': .01,
'sendmessagetimeout_timeout': .01,
'after_tabselect_wait': .05,
'after_listviewselect_wait': .01,
'after_listviewcheck_wait': .001,
'listviewitemcontrol_timeout': 1.5,
'after_treeviewselect_wait': .1,
'after_toobarpressbutton_wait': .01,
'after_updownchange_wait': .1,
'after_movewindow_wait': 0,
'after_buttoncheck_wait': 0,
'after_comboboxselect_wait': 0.001,
'after_listboxselect_wait': 0,
'after_listboxfocuschange_wait': 0,
'after_editsetedittext_wait': 0,
'after_editselect_wait': 0.02,
'drag_n_drop_move_mouse_wait': 0.1,
'before_drag_wait': 0.2,
'before_drop_wait': 0.1,
'after_drag_n_drop_wait': 0.1,
'scroll_step_wait': 0.1,
'app_exit_timeout': 10.,
'app_exit_retry': .1,
}
assert(__default_timing['window_find_timeout'] >=
__default_timing['window_find_retry'] * 2)
_timings = __default_timing.copy()
_cur_speed = 1
def __getattribute__(self, attr):
"""Get the value for a particular timing"""
if attr in ['__dict__', '__members__', '__methods__', '__class__']:
return object.__getattribute__(self, attr)
if attr in dir(TimeConfig):
return object.__getattribute__(self, attr)
if attr in self.__default_timing:
return self._timings[attr]
else:
raise AttributeError("Unknown timing setting: {0}".format(attr))
def __setattr__(self, attr, value):
"""Set a particular timing"""
if attr == '_timings':
object.__setattr__(self, attr, value)
elif attr in self.__default_timing:
self._timings[attr] = value
else:
raise AttributeError("Unknown timing setting: {0}".format(attr))
def Fast(self):
"""Set fast timing values
Currently this changes the timing in the following ways:
timeouts = 1 second
waits = 0 seconds
retries = .001 seconds (minimum!)
(if existing times are faster then keep existing times)
"""
for setting in self.__default_timing:
# set timeouts to the min of the current speed or 1 second
if "_timeout" in setting:
self._timings[setting] = \
min(1, self._timings[setting])
if "_wait" in setting:
self._timings[setting] = self._timings[setting] / 2
elif setting.endswith("_retry"):
self._timings[setting] = 0.001
#self._timings['app_start_timeout'] = .5
def Slow(self):
"""Set slow timing values
Currently this changes the timing in the following ways:
timeouts = default timeouts * 10
waits = default waits * 3
retries = default retries * 3
(if existing times are slower then keep existing times)
"""
for setting in self.__default_timing:
if "_timeout" in setting:
self._timings[setting] = max(
self.__default_timing[setting] * 10,
self._timings[setting])
if "_wait" in setting:
self._timings[setting] = max(
self.__default_timing[setting] * 3,
self._timings[setting])
elif setting.endswith("_retry"):
self._timings[setting] = max(
self.__default_timing[setting] * 3,
self._timings[setting])
if self._timings[setting] < .2:
self._timings[setting] = .2
def Defaults(self):
"""Set all timings to the default time"""
self._timings = self.__default_timing.copy()
Timings = TimeConfig()
#=========================================================================
class TimeoutError(RuntimeError):
pass
#=========================================================================
if six.PY3:
_clock_func = time.perf_counter
else:
_clock_func = time.clock
def timestamp():
"""Get a precise timestamp"""
return _clock_func()
#=========================================================================
def always_wait_until(timeout,
retry_interval,
value=True,
op=operator.eq):
"""Decorator to call wait_until(...) every time for a decorated function/method"""
def wait_until_decorator(func):
"""Callable object that must be returned by the @always_wait_until decorator"""
@wraps(func)
def wrapper(*args, **kwargs):
"""pre-callback, target function call and post-callback"""
return wait_until(timeout, retry_interval,
func, value, op, *args, **kwargs)
return wrapper
return wait_until_decorator
#=========================================================================
def wait_until(timeout,
retry_interval,
func,
value=True,
op=operator.eq,
*args, **kwargs):
r"""
Wait until ``op(function(*args, **kwargs), value)`` is True or until timeout expires
* **timeout** how long the function will try the function
* **retry_interval** how long to wait between retries
* **func** the function that will be executed
* **value** the value to be compared against (defaults to True)
* **op** the comparison function (defaults to equality)\
* **args** optional arguments to be passed to func when called
* **kwargs** optional keyword arguments to be passed to func when called
Returns the return value of the function
If the operation times out then the return value of the the function
is in the 'function_value' attribute of the raised exception.
e.g. ::
try:
# wait a maximum of 10.5 seconds for the
# the objects item_count() method to return 10
# in increments of .5 of a second
wait_until(10.5, .5, self.item_count, 10)
except TimeoutError as e:
print("timed out")
"""
start = timestamp()
func_val = func(*args, **kwargs)
# while the function hasn't returned what we are waiting for
while not op(func_val, value):
# find out how much of the time is left
time_left = timeout - (timestamp() - start)
# if we have to wait some more
if time_left > 0:
# wait either the retry_interval or else the amount of
# time until the timeout expires (whichever is less)
time.sleep(min(retry_interval, time_left))
func_val = func(*args, **kwargs)
else:
err = TimeoutError("timed out")
err.function_value = func_val
raise err
return func_val
# Non PEP-8 alias
WaitUntil = deprecated(wait_until)
#=========================================================================
def always_wait_until_passes(timeout,
retry_interval,
exceptions=(Exception)):
"""Decorator to call wait_until_passes(...) every time for a decorated function/method"""
def wait_until_passes_decorator(func):
"""Callable object that must be returned by the @always_wait_until_passes decorator"""
@wraps(func)
def wrapper(*args, **kwargs):
"""pre-callback, target function call and post-callback"""
return wait_until_passes(timeout, retry_interval,
func, exceptions, *args, **kwargs)
return wrapper
return wait_until_passes_decorator
#=========================================================================
def wait_until_passes(timeout,
retry_interval,
func,
exceptions=(Exception),
*args, **kwargs):
"""
Wait until ``func(*args, **kwargs)`` does not raise one of the exceptions
* **timeout** how long the function will try the function
* **retry_interval** how long to wait between retries
* **func** the function that will be executed
* **exceptions** list of exceptions to test against (default: Exception)
* **args** optional arguments to be passed to func when called
* **kwargs** optional keyword arguments to be passed to func when called
Returns the return value of the function
If the operation times out then the original exception raised is in
the 'original_exception' attribute of the raised exception.
e.g. ::
try:
# wait a maximum of 10.5 seconds for the
# window to be found in increments of .5 of a second.
# P.int a message and re-raise the original exception if never found.
wait_until_passes(10.5, .5, self.Exists, (ElementNotFoundError))
except TimeoutError as e:
print("timed out")
raise e.
"""
start = timestamp()
# keep trying until the timeout is passed
while True:
try:
# Call the function with any arguments
func_val = func(*args, **kwargs)
# if no exception is raised then we are finished
break
# An exception was raised - so wait and try again
except exceptions as e:
# find out how much of the time is left
time_left = timeout - (timestamp() - start)
# if we have to wait some more
if time_left > 0:
# wait either the retry_interval or else the amount of
# time until the timeout expires (whichever is less)
time.sleep(min(retry_interval, time_left))
else:
# Raise a TimeoutError - and put the original exception
# inside it
err = TimeoutError()
err.original_exception = e
raise err
# return the function value
return func_val
# Non PEP-8 alias
WaitUntilPasses = deprecated(wait_until_passes)
| 32.780435 | 94 | 0.619007 | [
"MIT"
] | snakyhuman/auto-tests | auto1/venv/Lib/site-packages/pywinauto/timings.py | 15,079 | Python |
from corvus.structures import Handler, Exchange, Loop, Update
import corvutils.pyparsing as pp
import os, sys, subprocess, shutil #, resource
import re
from scipy.interpolate import CubicSpline
from scipy.integrate import quad
from scipy.signal import convolve
import numpy as np
# Debug: FDV
import pprint
pp_debug = pprint.PrettyPrinter(indent=4)
# Define dictionary of implemented calculations
implemented = {}
strlistkey = lambda L:','.join(sorted(L))
subs = lambda L:[{L[j] for j in range(len(L)) if 1<<j&k} for k in range(1,1<<len(L))]
#for s in subs(['cell_vectors', 'cell_struct_xyz_red', 'cell_scaling_iso', 'cell_scaling_abc', 'number_density']):
# key = strlistkey(s)
# autodesc = 'Get ' + ', '.join(s) + ' using cif2cell'
# cost = 10
# implemented[key] = {'type':'Exchange','out':list(s),'req':['cif_input'],
# 'desc':autodesc,'cost':cost}
implemented['mbxanes'] = {'type':'Exchange','out':['mbxanes'],'cost':0,
'req':['xanes_cfavg','spectralFunction'],'desc':'Calculate many-body xanes from xanes and spectral function.'}
#'req':['xanes','spectal_function'],'desc':'Calculate supercell from cif input.'}
class mbconv(Handler):
def __str__(self):
return 'mbconv Handler'
@staticmethod
def canProduce(output):
if isinstance(output, list) and output and isinstance(output[0], str):
return strlistkey(output) in implemented
elif isinstance(output, str):
return output in implemented
else:
raise TypeError('Output should be token or list of tokens')
@staticmethod
def requiredInputFor(output):
if isinstance(output, list) and output and isinstance(output[0], str):
unresolved = {o for o in output if not mbconv.canProduce(o)}
canProduce = (o for o in output if mbconv.canProduce(o))
additionalInput = (set(implemented[o]['req']) for o in canProduce)
return list(set.union(unresolved,*additionalInput))
elif isinstance(output, str):
if output in implemented:
return implemented[output]['req']
else:
return [output]
else:
raise TypeError('Output should be token or list of tokens')
@staticmethod
def cost(output):
if isinstance(output, list) and output and isinstance(output[0], str):
key = strlistkey(output)
elif isinstance(output, str):
key = output
else:
raise TypeError('Output should be token or list of tokens')
if key not in implemented:
raise LookupError('Corvus cannot currently produce ' + key + ' using FEFF')
return implemented[key]['cost']
@staticmethod
def sequenceFor(output,inp=None):
if isinstance(output, list) and output and isinstance(output[0], str):
key = strlistkey(output)
elif isinstance(output, str):
key = output
else:
raise TypeError('Output should be token of list of tokens')
if key not in implemented:
raise LookupError('Corvus cannot currently produce ' + key + ' using FEFF')
f = lambda subkey : implemented[key][subkey]
required = f('req')
# JJK - Need to add requirements of internal workflow here.
if 'mbconv' in list(inp.keys()):
required.extend()
if f('type') is 'Exchange':
return Exchange(mbconv, f('req'), f('out'), cost=f('cost'), desc=f('desc'))
@staticmethod
def prep(config):
subdir = config['pathprefix'] + str(config['xcIndex']) + '_MBXANES'
xcDir = os.path.join(config['cwd'], subdir)
# Make new output directory if if doesn't exist
if not os.path.exists(xcDir):
os.mkdir(xcDir)
# Store current Exchange directory in configuration
config['xcDir'] = xcDir
#@staticmethod
#def setDefaults(input,target):
@staticmethod
def run(config, input, output):
# Loop over targets in output.
if 'mbxanes' in output:
# In future use file_reader handler to read in XANES and spectral function if already calculated.
w = np.array(input.get('xanes_cfavg')[0])
mu0= np.array(input.get('xanes_cfavg')[1])
wsf= np.flip(-1.0*np.array(input.get('spectralFunction')[0]))
sf = np.flip(np.array(input.get('spectralFunction')[1]))
# Interpolate both XANES and spectral function onto an even grid
#w, mu0 = np.loadtxt('xanes.dat',usecols = (0,1)).T
#wsf,sf = np.loadtxt('spfcn.dat',usecols = (0,1)).T
min_diff = np.amin(np.ediff1d(w))
min_diff = min(min_diff,np.amin(np.ediff1d(wsf)))
mu0_cs = CubicSpline(w,mu0)
spfcn_cs = CubicSpline(wsf,sf)
# Use larger of two ranges to specify range
w_terp = np.arange(w[0],w[-1],min_diff)
wsf_terp = np.arange(wsf[0],wsf[-1],min_diff)
mu0_terp = mu0_cs(w_terp)
spfcn_terp = spfcn_cs(wsf_terp)
mu_mb = convolve(mu0_terp,spfcn_terp,mode='full')*min_diff
# If extra broadening is requested, perform a convolution of that as well.
if 'mbconv.extra_broadening' in input:
gam = input['mbconv.extra_broadening'][0][0]
A_br = gam/np.pi*1.0/(wsf_terp**2 + gam**2)
mu_mb = np.convolve(mu_mb,A_br,mode='same')*min_diff
scale=w_terp[-1] - w_terp[0] + wsf_terp[-1] - wsf_terp[0]
first = w_terp[0] + wsf_terp[0]
w_terp = np.linspace(0.0,scale,mu_mb.size)
w_terp = w_terp + first
mu0_terp = mu0_cs(w_terp)
output['mbxanes'] = [w_terp,mu_mb]
np.savetxt('mbxanes.dat',np.array([w_terp, mu_mb, mu0_terp]).transpose())
@staticmethod
def cleanup(config):
pass
| 38.429487 | 134 | 0.604003 | [
"BSD-3-Clause"
] | times-software/Corvus | corvus/mbconv.py | 5,995 | Python |
# https://adventofcode.com/2020/day/14
import itertools
import re
SAMPLE_PATH = "../../input/2020-14-sample.txt"
INPUT_PATH = "../../input/2020-14-input.txt"
def get_data(filename):
with open(filename) as file:
data = file.read().split("\n\n")
data = [block.splitlines() for block in data]
if len(data) == 1:
return data[0]
return data
def part_1(program):
memory = {}
mask_to_0 = 0
mask_to_1 = 0
for line in program:
if line[:4] == "mask":
mask = line[7:]
mask_to_1 = int("".join(m if m == "1" else "0" for m in mask), 2)
mask_to_0 = int("".join(m if m == "0" else "1" for m in mask), 2)
else:
address, value = (int(x) for x in re.findall(r"(\d+)", line))
memory[address] = (value | mask_to_1) & mask_to_0
return sum(memory.values())
def part_2(program):
memory = {}
mask_to_1 = 0
mask_float = ""
n_floats = 0
for line in program:
if line[:4] == "mask":
mask = line[7:]
mask_to_1 = int("".join(m if m == "1" else "0" for m in mask), 2)
n_floats = mask.count("X")
mask_float = "".join("{}" if m == "X" else "0" for m in mask)
else:
address, value = (int(x) for x in re.findall(r"(\d+)", line))
address = address | mask_to_1
for bits in itertools.product("01", repeat=n_floats):
modified_mask = mask_float.format(*bits)
memory[address ^ int(modified_mask, 2)] = value
return sum(memory.values())
if __name__ == "__main__":
sample_data = get_data(SAMPLE_PATH)
assert part_1(sample_data[0]) == 165
assert part_2(sample_data[1]) == 208
challenge_data = get_data(INPUT_PATH)
print(part_1(challenge_data)) # 4297467072083
print(part_2(challenge_data)) # 5030603328768
| 29.453125 | 77 | 0.564987 | [
"MIT"
] | BastiHz/Advent_of_Code | python/2020/day14.py | 1,885 | Python |
import copy
from unittest import mock
import matplotlib
import pytest
from matplotlib import pyplot as plt
from matplotlib._pylab_helpers import Gcf
@pytest.fixture(autouse=True)
def mpl_test_settings(qt_module, mpl_test_settings):
"""
Ensure qt_module fixture is *first* fixture.
We override the `mpl_test_settings` fixture and depend on the `qt_module`
fixture first. It is very important that it is first, because it skips
tests when Qt is not available, and if not, then the main
`mpl_test_settings` fixture will try to switch backends before the skip can
be triggered.
"""
pass
@pytest.fixture
def qt_module(request):
backend, = request.node.get_closest_marker('backend').args
if backend == 'Qt4Agg':
try:
import PyQt4
# RuntimeError if PyQt5 already imported.
except (ImportError, RuntimeError):
try:
import PySide
except ImportError:
pytest.skip("Failed to import a Qt4 binding.")
elif backend == 'Qt5Agg':
try:
import PyQt5
# RuntimeError if PyQt4 already imported.
except (ImportError, RuntimeError):
try:
import PySide2
except ImportError:
pytest.skip("Failed to import a Qt5 binding.")
else:
raise ValueError('Backend marker has unknown value: ' + backend)
qt_compat = pytest.importorskip('matplotlib.backends.qt_compat')
QtCore = qt_compat.QtCore
if backend == 'Qt4Agg':
try:
py_qt_ver = int(QtCore.PYQT_VERSION_STR.split('.')[0])
except AttributeError:
py_qt_ver = QtCore.__version_info__[0]
if py_qt_ver != 4:
pytest.skip(reason='Qt4 is not available')
from matplotlib.backends.backend_qt4 import (
MODIFIER_KEYS, SUPER, ALT, CTRL, SHIFT)
elif backend == 'Qt5Agg':
from matplotlib.backends.backend_qt5 import (
MODIFIER_KEYS, SUPER, ALT, CTRL, SHIFT)
mods = {}
keys = {}
for name, index in zip(['Alt', 'Control', 'Shift', 'Super'],
[ALT, CTRL, SHIFT, SUPER]):
_, mod, key = MODIFIER_KEYS[index]
mods[name + 'Modifier'] = mod
keys[name + 'Key'] = key
return QtCore, mods, keys
@pytest.fixture
def qt_key(request):
QtCore, _, keys = request.getfixturevalue('qt_module')
if request.param.startswith('Key'):
return getattr(QtCore.Qt, request.param)
else:
return keys[request.param]
@pytest.fixture
def qt_mods(request):
QtCore, mods, _ = request.getfixturevalue('qt_module')
result = QtCore.Qt.NoModifier
for mod in request.param:
result |= mods[mod]
return result
@pytest.mark.parametrize('backend', [
# Note: the value is irrelevant; the important part is the marker.
pytest.param('Qt4Agg', marks=pytest.mark.backend('Qt4Agg')),
pytest.param('Qt5Agg', marks=pytest.mark.backend('Qt5Agg')),
])
def test_fig_close(backend):
# save the state of Gcf.figs
init_figs = copy.copy(Gcf.figs)
# make a figure using pyplot interface
fig = plt.figure()
# simulate user clicking the close button by reaching in
# and calling close on the underlying Qt object
fig.canvas.manager.window.close()
# assert that we have removed the reference to the FigureManager
# that got added by plt.figure()
assert init_figs == Gcf.figs
@pytest.mark.backend('Qt5Agg')
def test_fig_signals(qt_module):
# Create a figure
fig = plt.figure()
# Access QtCore
QtCore = qt_module[0]
# Access signals
import signal
event_loop_signal = None
# Callback to fire during event loop: save SIGINT handler, then exit
def fire_signal_and_quit():
# Save event loop signal
nonlocal event_loop_signal
event_loop_signal = signal.getsignal(signal.SIGINT)
# Request event loop exit
QtCore.QCoreApplication.exit()
# Timer to exit event loop
QtCore.QTimer.singleShot(0, fire_signal_and_quit)
# Save original SIGINT handler
original_signal = signal.getsignal(signal.SIGINT)
# Use our own SIGINT handler to be 100% sure this is working
def CustomHandler(signum, frame):
pass
signal.signal(signal.SIGINT, CustomHandler)
# mainloop() sets SIGINT, starts Qt event loop (which triggers timer and
# exits) and then mainloop() resets SIGINT
matplotlib.backends.backend_qt5._BackendQT5.mainloop()
# Assert: signal handler during loop execution is signal.SIG_DFL
assert event_loop_signal == signal.SIG_DFL
# Assert: current signal handler is the same as the one we set before
assert CustomHandler == signal.getsignal(signal.SIGINT)
# Reset SIGINT handler to what it was before the test
signal.signal(signal.SIGINT, original_signal)
@pytest.mark.parametrize(
'qt_key, qt_mods, answer',
[
('Key_A', ['ShiftModifier'], 'A'),
('Key_A', [], 'a'),
('Key_A', ['ControlModifier'], 'ctrl+a'),
('Key_Aacute', ['ShiftModifier'],
'\N{LATIN CAPITAL LETTER A WITH ACUTE}'),
('Key_Aacute', [],
'\N{LATIN SMALL LETTER A WITH ACUTE}'),
('ControlKey', ['AltModifier'], 'alt+control'),
('AltKey', ['ControlModifier'], 'ctrl+alt'),
('Key_Aacute', ['ControlModifier', 'AltModifier', 'SuperModifier'],
'ctrl+alt+super+\N{LATIN SMALL LETTER A WITH ACUTE}'),
('Key_Backspace', [], 'backspace'),
('Key_Backspace', ['ControlModifier'], 'ctrl+backspace'),
('Key_Play', [], None),
],
indirect=['qt_key', 'qt_mods'],
ids=[
'shift',
'lower',
'control',
'unicode_upper',
'unicode_lower',
'alt_control',
'control_alt',
'modifier_order',
'backspace',
'backspace_mod',
'non_unicode_key',
]
)
@pytest.mark.parametrize('backend', [
# Note: the value is irrelevant; the important part is the marker.
pytest.param('Qt4Agg', marks=pytest.mark.backend('Qt4Agg')),
pytest.param('Qt5Agg', marks=pytest.mark.backend('Qt5Agg')),
])
def test_correct_key(backend, qt_key, qt_mods, answer):
"""
Make a figure
Send a key_press_event event (using non-public, qtX backend specific api)
Catch the event
Assert sent and caught keys are the same
"""
qt_canvas = plt.figure().canvas
event = mock.Mock()
event.isAutoRepeat.return_value = False
event.key.return_value = qt_key
event.modifiers.return_value = qt_mods
def receive(event):
assert event.key == answer
qt_canvas.mpl_connect('key_press_event', receive)
qt_canvas.keyPressEvent(event)
@pytest.mark.backend('Qt5Agg')
def test_dpi_ratio_change():
"""
Make sure that if _dpi_ratio changes, the figure dpi changes but the
widget remains the same physical size.
"""
prop = 'matplotlib.backends.backend_qt5.FigureCanvasQT._dpi_ratio'
with mock.patch(prop, new_callable=mock.PropertyMock) as p:
p.return_value = 3
fig = plt.figure(figsize=(5, 2), dpi=120)
qt_canvas = fig.canvas
qt_canvas.show()
from matplotlib.backends.backend_qt5 import qApp
# Make sure the mocking worked
assert qt_canvas._dpi_ratio == 3
size = qt_canvas.size()
qt_canvas.manager.show()
qt_canvas.draw()
qApp.processEvents()
# The DPI and the renderer width/height change
assert fig.dpi == 360
assert qt_canvas.renderer.width == 1800
assert qt_canvas.renderer.height == 720
# The actual widget size and figure physical size don't change
assert size.width() == 600
assert size.height() == 240
# assert qt_canvas.get_width_height() == (600, 240)
# assert (fig.get_size_inches() == (5, 2)).all()
p.return_value = 2
assert qt_canvas._dpi_ratio == 2
qt_canvas.draw()
qApp.processEvents()
# this second processEvents is required to fully run the draw.
# On `update` we notice the DPI has changed and trigger a
# resize event to refresh, the second processEvents is
# required to process that and fully update the window sizes.
qApp.processEvents()
# The DPI and the renderer width/height change
# assert fig.dpi == 240
# assert qt_canvas.renderer.width == 1200
# assert qt_canvas.renderer.height == 480
# The actual widget size and figure physical size don't change
assert size.width() == 600
assert size.height() == 240
# assert qt_canvas.get_width_height() == (600, 240)
# assert (fig.get_size_inches() == (5, 2)).all()
@pytest.mark.backend('Qt5Agg')
def test_subplottool():
fig, ax = plt.subplots()
with mock.patch(
"matplotlib.backends.backend_qt5.SubplotToolQt.exec_",
lambda self: None):
fig.canvas.manager.toolbar.configure_subplots()
@pytest.mark.backend('Qt5Agg')
def test_figureoptions():
fig, ax = plt.subplots()
ax.plot([1, 2])
ax.imshow([[1]])
ax.scatter(range(3), range(3), c=range(3))
with mock.patch(
"matplotlib.backends.qt_editor._formlayout.FormDialog.exec_",
lambda self: None):
fig.canvas.manager.toolbar.edit_parameters()
| 31.069307 | 79 | 0.641066 | [
"MIT"
] | qiujiangkun/mplopengl | tests/test_backend_qt.py | 9,414 | Python |
import os
from indexing.pathanalyzer import PathAnalyzer
from indexing.pathanalyzerstore import PathAnalyzerStore
class Indexer:
"""
Traverses the given directory using the DFS algorithm. Allows registering different rules for handling different
file types and calls the associated PathAnalyzers and Collectors indirectly for each type.
"""
####################################################################################################################
# Constructor.
####################################################################################################################
def __init__(self, max_depth=10):
"""
Initializes attributes and checks the maximum depth provided.
Parameters
----------
max_depth : int
The maximum depth to look in.
"""
### Validate parameters.
if max_depth < 1:
raise Exception('max_depth must be greater than or equal to 1.')
### Attributes from outside.
self._max_depth = max_depth
### Private attributes.
# A collection of analyzers which handle different file types.
self._analyzers = []
# The depth we are currently in.
self._current_depth = 0
# The list of directories to index.
self._rules = {}
####################################################################################################################
# Public methods.
####################################################################################################################
def add_rule(self, directory, policy):
"""
Registers a new directory to index. Does nothing if the given directory is already added.
Parameters
----------
directory : str
The directory to be indexed.
policy : IndexerPolicy
A policy that applies to this directory.
"""
analyzer = self._create_analyzer(policy)
analyzer_store = self._create_analyzerstore(directory)
analyzer_store.add_analyzer(policy.extensions, analyzer)
def index(self):
"""
Initializes filters, initiates indexing and after the indexing process has finished, cleans filters.
"""
for analyzer in self._analyzers:
analyzer.init_filters()
for directory, analyzer_store in self._rules.items():
if os.path.exists(directory):
self._scan_directory(directory, analyzer_store)
for analyzer in self._analyzers:
analyzer.clean_filters()
####################################################################################################################
# Auxiliary methods.
####################################################################################################################
def _analyze_file(self, current_path, analyzer_store):
current_path_without_extension, current_extension = os.path.splitext(current_path)
analyzer = analyzer_store.find_analyzer(current_extension)
if analyzer is not None:
analyzer.analyze(current_path_without_extension, current_extension)
def _create_analyzer(self, policy):
analyzer = PathAnalyzer(policy)
self._analyzers.append(analyzer)
return analyzer
def _create_analyzerstore(self, directory):
if directory not in self._rules:
self._rules[directory] = PathAnalyzerStore()
return self._rules[directory]
def _enter(self, directory):
"""
Indicates for the analyzers that we entered into the given directory.
Parameters
----------
directory : str
The directory we entered.
"""
for analyzer in self._analyzers:
analyzer.enter(directory)
self._current_depth = self._current_depth + 1
def _leave(self):
"""
Indicates for the analyzers that we are leaving the last directory.
"""
for analyzer in self._analyzers:
analyzer.leave()
self._current_depth = self._current_depth - 1
def _scan_directory(self, path, analyzer_store):
"""
Does the real indexing. Iterates through the directory using DFS, and invokes the registered analyzers to
analyze and store the data.
Parameters
----------
path : str
The path to enumerate.
analyzers : PathAnalyzerStore
The PathAnalyzerStore to use.
"""
for current_file in os.listdir(path):
current_path = os.path.join(path, current_file)
if self._current_depth >= self._max_depth:
return
if os.path.isdir(current_path):
self._enter(current_file)
self._scan_directory(current_path, analyzer_store)
self._leave()
else:
self._analyze_file(current_path, analyzer_store)
| 32.496774 | 120 | 0.534247 | [
"MIT"
] | pgecsenyi/piepy | src/indexing/indexer.py | 5,037 | Python |
from win32com.propsys import propsys, pscon
print("propsys was imported (sorry - that is the extent of the tests,")
print("but see the shell folder_view demo, which uses this module)")
# that's all folks! | 51.75 | 72 | 0.753623 | [
"Apache-2.0"
] | Matchoc/python_env | python35/Lib/site-packages/win32comext/propsys/test/testpropsys.py | 207 | Python |
from __future__ import annotations
import logging
import os
import time
from functools import partial
from typing import Callable, Optional, Sequence, Union
import torch
from hivemind.averaging.control import AveragingStage, StepControl
from hivemind.compression import CompressionBase, NoCompression
from hivemind.dht import DHT
from hivemind.optim.grad_averager import GradientAverager
from hivemind.optim.grad_scaler import GradScaler
from hivemind.optim.progress_tracker import LocalTrainingProgress, ProgressTracker
from hivemind.optim.state_averager import (
LRSchedulerBase,
OptimizerFactory,
Parameters,
ParamGroups,
SchedulerFactory,
TorchOptimizer,
TrainingStateAverager,
)
from hivemind.utils import PerformanceEMA, get_dht_time, get_logger
logger = get_logger(__name__)
class Optimizer(torch.optim.Optimizer):
"""
hivemind.Optimizer wraps your regular PyTorch Optimizer for training collaboratively with peers.
By default, Optimizer is configured to be exactly **equivalent to synchronous training** with target_batch_size.
There are advanced options make training semi-asynchronous (delay_optimizer_step and delay_gradient_averaging)
or even fully asynchronous (use_local_updates=True).
:example: The Optimizer can be used as a drop-in replacement for a regular PyTorch Optimizer:
>>> model = transformers.AutoModel("albert-xxlarge-v2")
>>> dht = hivemind.DHT(initial_peers=INITIAL_PEERS, start=True)
>>> opt = hivemind.Optimizer(dht=dht, run_id="run_42", batch_size_per_step=4, target_batch_size=4096,
>>> params=model.parameters(), optimizer=lambda params: torch.optim.Adam(params))
>>> while True:
>>> loss = compute_loss_on_batch(model, batch_size=4)
>>> opt.zero_grad()
>>> loss.backward()
>>> opt.step() # <-- train collaboratively with any peers that use the same prefix (run_42)
By default, peers will perform the following steps:
* accumulate a minibatch of gradients towards the (global) target batch size, without updating parameters yet;
* after peers collectively accumulate target_batch_size, average gradients with peers and perform optimizer step;
* if your peer lags behind the rest of the swarm, it will download parameters and optimizer state from others;
Unlike regular training, your device may join midway through training, when other peers already made some progress.
For this reason, any learning rate schedulers, curriculum and other **time-dependent features should be based on**
``optimizer.local_epoch`` (and not the number ot calls to opt.step). Otherwise, peers that joined training late
may end up having different learning rates. To do so automatically, specify ``scheduler=...`` parameter below.
:What is an epoch?: Optimizer uses the term ``epoch`` to describe intervals between synchronizations. One epoch
coresponds to processing certain number of training samples (``target_batch_size``) in total across all peers.
Like in PyTorch LR Scheduler, **epoch does not necessarily correspond to a full pass over the training data.**
At the end of epoch, peers perform synchronous actions such as averaging gradients for a global optimizer update,
updating the learning rate scheduler or simply averaging parameters (if using local updates).
The purpose of this is to ensure that changing the number of peers does not require changing hyperparameters.
For instance, if the number of peers doubles, they will run all-reduce more frequently to adjust for faster training.
:Configuration guide: This guide will help you set up your first collaborative training run. It covers the most
important basic options, but ignores features that require significant changes to the training code.
>>> dht = hivemind.DHT(initial_peers=INITIAL_PEERS, client_mode=IF_BEHIND_FIREWALL_OR_VERY_UNRELIABLE, start=True)
>>> opt = hivemind.Optimizer(
>>> dht=dht, run_id="a_unique_name_that_every_participant_will_see_when_training",
>>> batch_size_per_step=ACTUAL_BATCH_SIZE_OF_THIS_PEER, target_batch_size=LARGE_GLOBAL_BATCH,
>>> # ^--- Each global optimzier step will use gradients from 1x-1.1x of target_batch_size (due to latency);
>>> # It is recommended to train with very large batch sizes to reduce the % of time spent on communication.
>>>
>>> params=params, optimizer=lambda params: AnyPyTorchOptimizer(params, **hyperparams_for_target_batch_size),
>>> # tune learning rate for your target_batch_size. Here's a good reference: https://arxiv.org/abs/1904.00962
>>> scheduler=lambda opt: AnyPyTorchScheduler(opt, **hyperparams_for_target_batch_size),
>>> # scheduler.step will be called automatically each time when peers collectively accumulate target_batch_size
>>>
>>> offload_optimizer=True, # saves GPU memory, but increases RAM usage; Generally a good practice to use this.
>>> delay_grad_averaging=OPTIONAL, delay_optimizer_step=OPTIONAL, # train faster, but with 1 round of staleness;
>>> # setting both to True is equivalent to Delayed Parameter Updates (see https://arxiv.org/abs/2101.06840)
>>>
>>> grad_compression=hivemind.Float16Compression(), state_averaging_compression=hivemind.Float16Compression(),
>>> # ^-- it is usually fine to use pure 16-bit or even lower precision during communication with no precaution;
>>> # See hivemind/examples/albert for an working example of mixed 8/16-bit compression.
>>>
>>> matchmaking_time=15.0, # 3-5s for small local runs, 10-15s for training over the internet or with many peers
>>> averaging_timeout=60.0, # around of 2x the actual time it takes to run all-reduce
>>> verbose=True # periodically report the training progress to the console (e.g. "Averaged with N peers")
>>> ) # and you're done!
:param dht: a running hivemind.DHT instance connected to other peers.
:param run_id: a unique identifier of this training run, used as a common prefix for all DHT keys.
**Note:** peers with the same run_id should *generally* train the same model and use compatible configurations.
Some options can be safely changed by individual peers: ``batch_size_per_step``, ``client_mode``, ``auxiliary``,
``reuse_grad_buffers``, ``offload_optimizer``, and ``verbose``. In some cases, other options may also be tuned
individually by each peer, but they should be changed with caution to avoid deadlocks or convergence issues.
:param target_batch_size: global batch size that must be accumulated before the swarm transitions to the next epoch.
The actual batch may be *slightly* larger due asynchrony (e.g. peers submit more gradients in the last second).
:param batch_size_per_step: you should accumulate gradients over this many samples between calls to optimizer.step.
:param params: parameters or param groups for the optimizer; required if optimizer is a callable(params).
:param optimizer: a callable(parameters) -> pytorch.optim.Optimizer or a pre-initialized PyTorch optimizer.
**Note:** some advanced options like offload_optimizer, delay_optimizer_step, or delay_grad_averaging require
and require the callable and will not work if hivemind.optimizer is created with a pre-existing PyTorch Optimizer.
:param scheduler: callable(optimizer) -> PyTorch LRScheduler or a pre-initialized PyTorch scheduler.
The learning rate scheduler will adjust learning rate based on global epoch, not the number of
local calls to optimizer.step; this is required to keep different peers synchronized.
:param matchmaking_time: when looking for group, wait for peers to join for up to this many seconds.
Increase if you see "averaged gradients with N peers" where N is below 0.9x the real siee on >=25% of epochs.
When training with low-latency network, decreasing matchmaking_time allows training with smaller batch sizes.
:param averaging_timeout: if an averaging step hangs for this long, it will be cancelled automatically.
Increase averaging_timeout if you see "Proceeding with local gradients" at least 25% of the time.
Do not set this timeout too high, as it may cause your optimizer to hang after some types of network errors.
:param allreduce_timeout: timeout for a single attempt to run all-reduce, default: equal to averaging_timeout.
:param load_state_timeout: wait for at most this many seconds before giving up on load_state_from_peers.
:param reuse_grad_buffers: if True, use model's .grad buffers for gradient accumulation.
This is more memory efficient, but it requires that the user does *NOT* call model/opt zero_grad at all
:param offload_optimizer: offload the optimizer to host memory, saving GPU memory for parameters and gradients
:param delay_optimizer_step: run optimizer in background, apply results in future .step; requires offload_optimizer
:param delay_grad_averaging: average gradients in background; requires offload_optimizer and delay_optimizer_step
:param delay_state_averaging: if enabled (default), average parameters and extra tensors in a background thread;
if set to False, average parameters synchronously within the corresponding hivemind.Optimizer.step call.
:param average_state_every: average state (parameters, chosen opt tensors) with peers every this many **epochs**.
This reduces the communication overhead increasing, but can cause parameters to diverge if too large.
The maximal average_state_every=num_epochs depends on how often peers diverge from each other. If peers
hardly ever skip averaging rounds, they can average state less frequently. In turn, network failures, lossy
gradient compression and local_updates cause parameters to diverge faster and requires more frequent averaging.
:param use_local_updates: if enabled, peers will update parameters on each .step using local gradients;
if not enabled (default), accumulate gradients to target_batch_size, and then call .step with averaged gradients.
Even if use_local_updates=True, learning rate scheduler will still be called once per target_batch_size.
:param client_mode: if True, this peer will not accept incoming connections (firewall-compatible mode)
:param auxiliary: if True, optimizer.step will only assist other peers in averaging (for cpu-only workers)
:param grad_compression: compression strategy used for averaging gradients, default = no compression
:param state_averaging_compression: compression for averaging params and state tensors, default = no compression
:param load_state_compression: compression strategy for loading state from peers, default = no compression
:param average_opt_statistics: names of optimizer statistics from state dict that should be averaged with peers
:param extra_tensors: if specified, these extra tensors will also be averaged and shared in load_state_from_peers.
:param averager_opts: additional keyword arguments forwarded to both GradientAverager and TrainingStateAverager
:param tracker_opts: additional keyword arguments forwarded to ProgressTracker
:param performance_ema_alpha: moving average alpha in ProgressTracker, TrainingStateAverager and Optimizer
:param verbose: if True, report internal events such as accumilating gradients and running background tasks
:note: in a large-scale training, peers will inevitably fail and you will see error messages. hivemind.Optimizer
is designed to recover from such failures, but will sometimes need a minute or two to re-adjust.
"""
def __init__(
self,
*,
dht: DHT,
run_id: str,
target_batch_size: int,
batch_size_per_step: Optional[int] = None,
optimizer: Union[TorchOptimizer, OptimizerFactory],
params: Optional[Union[Parameters, ParamGroups]] = None,
scheduler: Optional[Union[LRSchedulerBase, SchedulerFactory]] = None,
matchmaking_time: Optional[float] = 15.0,
averaging_timeout: Optional[float] = 60.0,
allreduce_timeout: Optional[float] = None,
next_chunk_timeout: Optional[float] = None,
load_state_timeout: float = 600.0,
reuse_grad_buffers: bool = False,
offload_optimizer: Optional[bool] = None,
delay_optimizer_step: Optional[bool] = None,
delay_grad_averaging: bool = False,
delay_state_averaging: bool = True,
average_state_every: int = 1,
use_local_updates: bool = False,
client_mode: bool = None,
auxiliary: bool = False,
grad_compression: CompressionBase = NoCompression(),
state_averaging_compression: CompressionBase = NoCompression(),
load_state_compression: CompressionBase = NoCompression(),
average_opt_statistics: Sequence[str] = (),
extra_tensors: Sequence[torch.Tensor] = (),
averager_opts: Optional[dict] = None,
tracker_opts: Optional[dict] = None,
performance_ema_alpha: float = 0.1,
shutdown_timeout: float = 5,
verbose: bool = False,
):
self._parent_pid = os.getpid()
client_mode = client_mode if client_mode is None else dht.client_mode
delay_optimizer_step = delay_optimizer_step if delay_optimizer_step is not None else delay_grad_averaging
offload_optimizer = offload_optimizer if offload_optimizer is not None else (params is not None)
allreduce_timeout = allreduce_timeout if allreduce_timeout is not None else averaging_timeout
next_chunk_timeout = next_chunk_timeout if next_chunk_timeout is not None else matchmaking_time
assert not delay_grad_averaging or delay_optimizer_step, "delay_grad_averaging requires delay_optimizer_step"
assert not (client_mode and auxiliary), "Client-mode peers cannot serve as auxiliaries"
assert not auxiliary or batch_size_per_step is None, "Auxiliary peers should not accumulate batches"
if callable(optimizer) and params is not None:
if scheduler is not None and (not callable(scheduler) or isinstance(scheduler, LRSchedulerBase)):
raise ValueError("For this mode, please provide scheduler factory: callable(optimizer) -> scheduler")
elif all(hasattr(optimizer, attr) for attr in ("param_groups", "step", "zero_grad")):
if offload_optimizer or delay_optimizer_step or delay_grad_averaging:
raise ValueError(
"To enable offload_optimizer or delayed updates, please initialize Optimizer as "
"hivemind.Optimizer(..., params=params, optimizer=lambda params: create_opt(params)"
)
else:
raise ValueError(
"Please initialize the optimizer in one of the following two ways:\n"
"(A) hivemind.Optimizer(..., params=params, optimizer=lambda params: create_opt(params)\n"
"(B) hivemind.Optimizer(..., optimizer=pre_initialize_optimizer)"
)
if use_local_updates:
assert not reuse_grad_buffers, "if local_updates is True, gradients will not be accumulated"
assert not delay_grad_averaging, "if local_updates is True, gradients will not be averaged"
self.dht, self.run_id, self.client_mode, self.auxiliary = dht, run_id, client_mode, auxiliary
self.batch_size_per_step, self.target_batch_size = batch_size_per_step, target_batch_size
self.delay_state_averaging, self.average_state_every = delay_state_averaging, average_state_every
self.matchmaking_time, self.offload_optimizer = matchmaking_time, offload_optimizer
self.delay_grad_averaging, self.delay_optimizer_step = delay_grad_averaging, delay_optimizer_step
self.averaging_timeout, self.allreduce_timeout = averaging_timeout, allreduce_timeout
self.load_state_timeout, self.shutdown_timeout = load_state_timeout, shutdown_timeout
self.next_chunk_timeout = next_chunk_timeout
self.status_loglevel = logging.INFO if verbose else logging.DEBUG
self.scheduled_grads: Optional[StepControl] = None
self.scheduled_state: Optional[StepControl] = None
self.tracker = self._make_progress_tracker(
target_batch_size, performance_ema_alpha=performance_ema_alpha, **tracker_opts or {}
)
self.state_averager = self._make_state_averager(
optimizer=optimizer,
params=params,
scheduler=scheduler,
delta_rule_averaging=use_local_updates and self.delay_state_averaging,
compression=state_averaging_compression,
state_compression=load_state_compression,
average_opt_statistics=average_opt_statistics,
performance_ema_alpha=performance_ema_alpha,
extra_tensors=extra_tensors,
**averager_opts or {},
)
if not use_local_updates:
self.grad_averager = self._make_gradient_averager(
reuse_grad_buffers=reuse_grad_buffers, compression=grad_compression, **averager_opts or {}
)
else:
self.grad_averager = None
self._should_check_synchronization_on_update = True # used in self.should_load_state_from_peers
self._schema_hash = self._compute_schema_hash()
self.delay_before_state_averaging = PerformanceEMA(alpha=performance_ema_alpha)
# measures the average time from the beginning of self._update_global_epoch to the call to state_averager
# used for pre-scheduling the averaging round in state_averager
self._step_supports_amp_scaling = reuse_grad_buffers
# note: the line above is used by pytorch AMP GradScaler to enable custom behavior needed when reusing gradient
# buffers over multiple steps (to avoid repeated unscaling). Without reuse_grad_buffers, this is not needed.
def _make_state_averager(self, **kwargs) -> TrainingStateAverager:
return TrainingStateAverager(
dht=self.dht,
prefix=f"{self.run_id}_state_averager",
min_matchmaking_time=self.matchmaking_time,
allreduce_timeout=self.allreduce_timeout,
shutdown_timeout=self.shutdown_timeout,
offload_optimizer=self.offload_optimizer,
custom_gradients=self.offload_optimizer,
status_loglevel=self.status_loglevel,
next_chunk_timeout=self.next_chunk_timeout,
client_mode=self.client_mode,
auxiliary=self.auxiliary,
start=True,
**kwargs,
)
def _make_gradient_averager(self, **kwargs) -> GradientAverager:
assert hasattr(self, "state_averager"), "must initialize state averager first"
grad_averager = GradientAverager(
dht=self.dht,
prefix=f"{self.run_id}_grad_averager",
parameters=self.state_averager.main_parameters,
min_matchmaking_time=self.matchmaking_time,
allreduce_timeout=self.allreduce_timeout,
shutdown_timeout=self.shutdown_timeout,
next_chunk_timeout=self.next_chunk_timeout,
client_mode=self.client_mode,
auxiliary=self.auxiliary,
start=True,
**kwargs,
)
if self.offload_optimizer:
optimized_param_groups = self.state_averager.optimizer.param_groups
optimized_parameters = [param for group in optimized_param_groups for param in group["params"]]
with grad_averager.get_tensors() as averaged_gradients:
assert len(averaged_gradients) == len(optimized_parameters)
for opt_param, averaged_grad in zip(optimized_parameters, averaged_gradients):
opt_param.grad = averaged_grad
return grad_averager
def _make_progress_tracker(self, target_batch_size: int, **kwargs) -> ProgressTracker:
return ProgressTracker(
dht=self.dht,
prefix=self.run_id,
target_batch_size=target_batch_size,
client_mode=self.client_mode,
status_loglevel=self.status_loglevel,
start=True,
**kwargs,
)
def _compute_schema_hash(self) -> int:
optimized_param_groups = self.state_averager.optimizer.param_groups
optimized_parameters = [param for group in optimized_param_groups for param in group["params"]]
param_shapes = tuple(tuple(param.shape) for param in optimized_parameters)
# offloaded optimizer requires that gradient tensors are reused between iterations
grad_ids = tuple(id(param.grad) for param in optimized_parameters) if self.offload_optimizer else None
return hash((grad_ids, param_shapes))
def is_alive(self) -> bool:
return self.state_averager.is_alive()
@property
def local_epoch(self) -> int:
"""
This worker's current epoch, kept synchronized with peers. If peer's local_epoch lags behind others, it will
automatically re-synchronize by downloading state from another peer.
An epoch corresponds to accumulating target_batch_size across all active devices.
"""
return self.state_averager.local_epoch
@property
def local_progress(self) -> LocalTrainingProgress:
return self.tracker.local_progress
@property
def use_local_updates(self) -> bool:
return self.grad_averager is None
@property
def use_gradient_averaging(self) -> bool:
return self.grad_averager is not None
def step(
self,
closure: Optional[Callable[[], torch.Tensor]] = None,
batch_size: Optional[int] = None,
grad_scaler: Optional[GradScaler] = None,
):
"""
Update training progress after accumulating another local batch size. Depending on the configuration, this will
report progress to peers, run global or local optimizer step, average parameters or schedule background tasks.
:param closure: A closure that reevaluates the model and returns the loss.
:param batch_size: optional override for batch_size_per_step from init.
:param grad_scaler: if amp is enabled, this **must** be a hivemind-aware gradient scaler.
:note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.
"""
if grad_scaler is not None and not isinstance(grad_scaler, GradScaler):
raise ValueError("hivemind.Optimizer requires a hivemind-aware gradient scaler (hivemind.GradScaler)")
if self.batch_size_per_step is None and batch_size is None and not self.auxiliary:
raise ValueError("Please either set batch_size_per_step parameter at init or when calling .step")
if self.auxiliary and (closure is not None or batch_size is not None or grad_scaler is not None):
raise ValueError("Auxiliary peers should not have batch size, run closures, or use grad_scaler")
batch_size = batch_size if batch_size is not None else self.batch_size_per_step
# if delayed updates finished before step, apply these updates; otherwise do nothing
self.state_averager.step(apply_delayed_updates=True)
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
if not self.auxiliary and self._should_load_state_from_peers():
logger.log(self.status_loglevel, "Peer is out of sync")
self.load_state_from_peers()
return loss # local gradients were computed with out-of-sync parameters, must start over
if self.use_gradient_averaging:
# accumulate gradients toward target batch size, then aggregate with peers and run optimizer
if not self.auxiliary:
grads_are_valid = self._check_and_accumulate_gradients(batch_size, grad_scaler)
if not grads_are_valid:
return loss # local gradients were reset due to overflow, must start over
self._maybe_schedule_gradient_averaging()
self._maybe_schedule_state_averaging()
else:
# use_local_updates=True: update parameters on every step independently of other peers
if not self.auxiliary:
if grad_scaler is not None:
with grad_scaler.running_global_step():
assert grad_scaler.unscale_(self)
new_samples_accumulated = self.tracker.local_progress.samples_accumulated + batch_size
self.tracker.report_local_progress(self.local_epoch, new_samples_accumulated)
self._maybe_schedule_state_averaging()
self.state_averager.step(
increment_epoch=False,
optimizer_step=True,
delay_optimizer_step=self.delay_optimizer_step,
grad_scaler=grad_scaler,
)
if self.tracker.ready_to_update_epoch:
self._update_global_epoch(grad_scaler)
return loss
def _update_global_epoch(self, grad_scaler: Optional[GradScaler]) -> None:
"""Depending on the configuration: aggregate gradients and/or parameters, perform global optimizer step"""
assert self._schema_hash == self._compute_schema_hash(), "parameters or gradients changed during iteration"
_epoch_start_time = time.perf_counter()
with self.tracker.pause_updates():
wait_for_trigger = None
if self.use_gradient_averaging:
logger.log(self.status_loglevel, f"Beginning optimizer step #{self.local_epoch}")
if self.delay_optimizer_step:
self.state_averager.step(wait_for_delayed_updates=True)
began_averaging_gradients = self._begin_averaging_gradients(grad_scaler)
if not began_averaging_gradients:
# failed to start gradient averaging due to an internal error
self.grad_averager.load_accumulators_into_averager_()
elif self.delay_grad_averaging:
# if using delayed grad averaing, send this to state_averager as a pre-condition for optimizer step
wait_for_trigger = partial(self._average_gradients_and_load_into_optimizer, self.scheduled_grads)
else:
# delay_grad_averaging=False, average gradients immediately
self._average_gradients_and_load_into_optimizer(self.scheduled_grads)
next_epoch = max(self.local_epoch + 1, self.tracker.global_epoch)
swarm_not_empty = self.tracker.global_progress.num_peers > 1
should_perform_optimizer_step = not self.auxiliary and not self.use_local_updates
should_average_state = (
swarm_not_empty
and next_epoch % self.average_state_every == 0
and not self.state_averager.averaging_in_progress
)
if should_average_state and self.scheduled_state is not None:
if self.scheduled_state.triggered or self.scheduled_state.done():
logger.log(
self.status_loglevel,
f"Not using pre-scheduled group for state averaging because it"
f"was already used elsewhere: {self.scheduled_state}",
)
self.scheduled_state = None
self.delay_before_state_averaging.update(task_size=1, interval=time.perf_counter() - _epoch_start_time)
self.state_averager.step(
increment_epoch=True,
wait_for_trigger=wait_for_trigger,
optimizer_step=should_perform_optimizer_step,
delay_optimizer_step=self.delay_optimizer_step and should_perform_optimizer_step,
grad_scaler=grad_scaler,
averaging_round=should_average_state,
delay_averaging=self.delay_state_averaging and not self.auxiliary,
averaging_control=self.scheduled_state if should_average_state else None,
averaging_opts=dict(timeout=self.averaging_timeout) if should_average_state else None,
)
if not should_average_state and self.scheduled_state is not None and not self.scheduled_state.done():
self.scheduled_state.cancel()
self.scheduled_state = None
self.tracker.update_epoch(new_epoch=self.state_averager.local_epoch)
self._should_check_synchronization_on_update = True
# the above line ensures that peers check for *strict* synchronization once per epoch
if not self.client_mode:
self.state_averager.state_sharing_priority = self.local_epoch
if self.use_gradient_averaging and not self.auxiliary:
self.grad_averager.reset_accumulated_grads_()
if not self.client_mode:
self.grad_averager.state_sharing_priority = self.local_epoch
logger.log(self.status_loglevel, f"Transitioning to epoch {self.local_epoch}")
def _begin_averaging_gradients(self, grad_scaler: Optional[GradScaler]) -> bool:
"""Begin an all-reduce round to average gradients; return True if succeeded, False if failed"""
if grad_scaler is not None:
with grad_scaler.running_global_step():
assert grad_scaler.unscale_(self)
began_averaging_gradients = False
if self.scheduled_grads is not None and (self.scheduled_grads.triggered or self.scheduled_grads.done()):
logger.log(
self.status_loglevel,
f"Not using pre-scheduled group for state averaging because it"
f"was already used elsewhere: {self.scheduled_state}",
)
self.scheduled_grads = None
elif self.tracker.global_progress.num_peers > 1:
try:
self.scheduled_grads = self.grad_averager.step(
control=self.scheduled_grads, reset_accumulators=True, wait=False
)
began_averaging_gradients = True
except BaseException as e:
logger.exception(e)
if not began_averaging_gradients and self.scheduled_grads is not None and not self.scheduled_grads.done():
if self.tracker.global_progress.num_peers > 1:
logger.log(self.status_loglevel, f"Tagging along for a pre-scheduled gradient averaging round")
self._tag_along_with_zero_weight(self.scheduled_grads)
else:
logger.log(self.status_loglevel, f"Skipping pre-scheduled averaging round: there are no other peers")
self._load_local_gradients_into_optimizer()
self.scheduled_grads.cancel()
self.scheduled_grads = None
return began_averaging_gradients
def _check_and_accumulate_gradients(self, batch_size: int, grad_scaler: Optional[GradScaler]) -> bool:
"""Check if gradients are valid, accumulate and return True; otherwise, reset and return False"""
assert not self.use_local_updates and not self.auxiliary
if grad_scaler is not None and not grad_scaler.are_grads_finite(self):
logger.log(self.status_loglevel, "Encountered incorrect value in fp16 grads, resetting local gradients")
self.tracker.report_local_progress(self.local_epoch, samples_accumulated=0)
self.grad_averager.reset_accumulated_grads_()
return False
self.grad_averager.accumulate_grads_(batch_size)
self.tracker.report_local_progress(self.local_epoch, self.grad_averager.local_samples_accumulated)
return True
def _maybe_schedule_gradient_averaging(self) -> None:
"""If next epoch is coming soon, schedule the next gradient averaging round at the estimated end of epoch"""
assert self.use_gradient_averaging
if self.tracker.estimated_next_update_time - get_dht_time() <= self.matchmaking_time:
if self.scheduled_grads is None or self.scheduled_grads.triggered or self.scheduled_grads.done():
eta_seconds = self.tracker.estimated_next_update_time - get_dht_time()
eta_seconds = max(eta_seconds, self.grad_averager.matchmaking_kwargs["min_matchmaking_time"])
logger.log(self.status_loglevel, f"Pre-scheduling gradient averaging round in {eta_seconds:.2f} sec")
self.scheduled_grads = self.grad_averager.schedule_step(timeout=self.averaging_timeout)
def _maybe_schedule_state_averaging(self) -> None:
"""If next epoch is coming soon, schedule the next state averaging at estimated parameter averaging start"""
next_epoch = max(self.local_epoch + 1, self.tracker.global_epoch)
if next_epoch % self.average_state_every != 0:
return # averaging is not performed at this epoch
if self.state_averager.averaging_in_progress:
return # previous run is still in progress
if self.delay_before_state_averaging.num_updates == 0:
return # not enough data to accurately pre-schedule
estimated_time = self.tracker.estimated_next_update_time
estimated_time += self.delay_before_state_averaging.ema_seconds_per_sample
estimated_time += self.state_averager.delay_before_averaging.ema_seconds_per_sample
eta_seconds_to_averaging = estimated_time - get_dht_time()
if eta_seconds_to_averaging <= self.matchmaking_time:
if self.scheduled_state is None or self.scheduled_state.triggered or self.scheduled_state.done():
min_matchmaking_time = self.state_averager.matchmaking_kwargs["min_matchmaking_time"]
actual_seconds = max(eta_seconds_to_averaging, min_matchmaking_time)
logger.log(self.status_loglevel, f"Pre-scheduling state averaging round in {actual_seconds:.2f} sec")
self.scheduled_state = self.state_averager.schedule_step(
gather=next_epoch, timeout=self.averaging_timeout
)
def _average_gradients_and_load_into_optimizer(self, maybe_step_control: Optional[StepControl]):
"""Run gradient averaging; on success, feed averaged gradients into optimizer; else, use local gradients"""
assert self.use_gradient_averaging and maybe_step_control is None or maybe_step_control.triggered
averaged_gradients = False
try:
if maybe_step_control is not None:
group_info = maybe_step_control.result(self.averaging_timeout)
logger.log(self.status_loglevel, f"Averaged gradients with {len(group_info)} peers")
self._load_averaged_gradients_into_optimizer_()
averaged_gradients = True
else:
logger.log(self.status_loglevel, f"Skipped averaging: there are no other peers")
except BaseException as e:
logger.log(self.status_loglevel, f"Averaging gradients failed with {repr(e)}")
if not averaged_gradients:
self._load_local_gradients_into_optimizer()
def _load_averaged_gradients_into_optimizer_(self):
"""If required, load averaged gradients into optimizer; otherwise simply notify grad averager"""
assert self.use_gradient_averaging
if self.offload_optimizer:
pass # averaged gradients are already baked into optimizer, see _make_gradient_averager
else:
# copy averaged gradients into optimizer .grad buffers
optimized_param_groups = self.state_averager.optimizer.param_groups
optimized_parameters = [param for group in optimized_param_groups for param in group["params"]]
with torch.no_grad(), self.grad_averager.get_tensors() as averaged_gradients:
assert len(averaged_gradients) == len(optimized_parameters)
for opt_param, averaged_grad in zip(optimized_parameters, averaged_gradients):
opt_param.grad.copy_(averaged_grad, non_blocking=True)
self.grad_averager.notify_used_averaged_gradients()
def _load_local_gradients_into_optimizer(self):
"""Fallback to using local gradients in the optimizer (instead of averaged gradients)"""
logger.log(self.status_loglevel, f"Proceeding with local gradients")
self.grad_averager.load_accumulators_into_averager_()
# note: we load gradients into grad_averager even though there is only one peer because of two reasons:
# - if offload_optimizer, then we must load gradients onto the CPU gradient buffers used by the optimizer
# - if not offload_optimizer, we must un-scale gradients (divide them by the number of accumulation steps)
self._load_averaged_gradients_into_optimizer_()
def zero_grad(self, set_to_none: bool = False):
"""Reset gradients from model. If reuse_grad_buffers=True, this will raise an error."""
if self.use_gradient_averaging and self.grad_averager.reuse_grad_buffers:
raise ValueError(
f"When running {self.__class__.__name__} with reuse_grad_buffers=True, user should never "
f"call zero_grad manually. Gradients will be refreshed internally"
)
for param_group in self.param_groups:
for param in param_group["params"]:
if param.grad is None:
pass
elif set_to_none:
param.grad = None
else:
param.grad.zero_()
def _should_load_state_from_peers(self) -> bool:
"""
If true, peer will discard local progress and attempt to download state from peers.
This method allows peer to continue training in two cases:
- peer is on the same epoch as other collaborators - keep training normally
- peer was on the same epoch and accumulated some grads, but some collaborators
have just transitioned to the next epoch - this peer should also transition.
:note: The latter case occurs due to the lack of network synchrony: the first peer that
detects enough samples will transition to the next step and start counting samples anew.
Some other peers may take time before they check with DHT and observe that
- the global epoch is technically one epoch ahead of the current one and
- the remaining (non-transitioned) peers no longer have target_batch_size between them
If this is the case, peer should transition to the next epoch and does *not* need to re-load state.
"""
if self._should_check_synchronization_on_update and self.tracker.fetched_global_progress_this_epoch.is_set():
self._should_check_synchronization_on_update = False
return self.local_epoch != self.tracker.global_epoch # require exact synchronization once per step
return self.local_epoch < self.tracker.global_epoch - 1 # catch up if a peer just switched to next epoch
def is_synchronized_with_peers(self) -> bool:
"""Checks whether the current peer is up-to-date with others in terms of the epoch (step) number."""
return self.local_epoch >= self.tracker.global_epoch - 1
def load_state_from_peers(self, **kwargs):
"""
Attempt to load the newest collaboration state from other peers within the same run_id.
If successful, this will update parameters, optimizer state, local epoch and learning rate schedule in-place.
"""
# note: we tag along for the next all-reduce because the run may have already started and cancelling it
# will cause peers to restart matchmaking and may stall the entire collaboration for a few seconds.
if self.scheduled_grads is not None and not self.scheduled_grads.done():
self._tag_along_with_zero_weight(self.scheduled_grads)
self.scheduled_grads = None
self.state_averager.step(wait_for_delayed_updates=True)
with self.tracker.pause_updates():
while True:
try:
self.state_averager.load_state_from_peers(timeout=self.load_state_timeout, **kwargs)
break
except KeyboardInterrupt:
raise
except BaseException as e:
logger.exception(f"Failed to load state from peers: {e}, retrying ...")
continue
if self.tracker.global_epoch - 1 <= self.local_epoch < self.tracker.global_epoch:
logger.log(self.status_loglevel, f"Catching up with collaboration step {self.tracker.global_epoch}")
self.state_averager.local_epoch = self.tracker.global_epoch
self.tracker.report_local_progress(local_epoch=self.local_epoch, samples_accumulated=0)
if not self.client_mode:
self.state_averager.state_sharing_priority = self.local_epoch
if self.use_gradient_averaging:
self.grad_averager.reset_accumulated_grads_()
if not self.client_mode:
self.grad_averager.state_sharing_priority = self.local_epoch
def state_dict(self) -> dict:
state_dict = self.state_averager.optimizer.state_dict()
state_dict["state"]["local_epoch"] = self.local_epoch
return state_dict
def load_state_dict(self, state_dict: dict):
if "local_epoch" in state_dict["state"]:
self.state_averager.local_epoch = state_dict["state"].pop("local_epoch")
return self.state_averager.optimizer.load_state_dict(state_dict)
@property
def state(self):
return dict(self.state_averager.optimizer.state, local_epoch=self.local_epoch)
@property
def opt(self) -> TorchOptimizer:
return self.state_averager.optimizer
@property
def param_groups(self) -> ParamGroups:
next_index = 0
param_groups = tuple(dict(param_group) for param_group in self.state_averager.optimizer.param_groups)
for param_group in param_groups:
num_params = len(param_group["params"])
main_params_for_group = self.state_averager.main_parameters[next_index : next_index + num_params]
param_group["params"] = main_params_for_group
next_index += num_params
assert next_index == len(self.state_averager.main_parameters)
return param_groups
def add_param_group(self, param_group: dict) -> None:
raise ValueError(
f"{self.__class__.__name__} does not support calling add_param_group after creation. "
f"Please provide all parameter groups at init"
)
def __repr__(self):
return f"{self.__class__.__name__}(prefix={self.run_id}, epoch={self.local_epoch})"
def _tag_along_with_zero_weight(self, control: StepControl):
"""Wait for a running averaging round to finish with zero weight."""
if not control.triggered:
control.weight = 0
control.allow_allreduce()
if not control.done():
try:
control.result(self.averaging_timeout)
except BaseException as e:
logger.exception(e)
if not control.done():
control.cancel()
def shutdown(self):
logger.log(self.status_loglevel, "Sending goodbye to peers...")
self.tracker.shutdown(self.shutdown_timeout)
self.state_averager.step(wait_for_delayed_updates=True)
for scheduled_round in self.scheduled_grads, self.scheduled_state:
if scheduled_round is not None:
if scheduled_round.stage == AveragingStage.LOOKING_FOR_GROUP:
scheduled_round.cancel()
else:
self._tag_along_with_zero_weight(scheduled_round)
logger.log(self.status_loglevel, "Shutting down averagers...")
self.state_averager.shutdown()
if self.use_gradient_averaging:
self.grad_averager.shutdown()
logger.log(self.status_loglevel, f"{self.__class__.__name__} is shut down")
def __del__(self):
if self._parent_pid == os.getpid() and self.is_alive():
self.shutdown()
| 57.198718 | 123 | 0.701984 | [
"MIT"
] | MeshchaninovViacheslav/hivemind | hivemind/optim/optimizer.py | 44,615 | Python |
'''A temporarily outdated visualization module.'''
import graphviz as gv
from .model import Model
def visualize(model: Model, structural_part=True, measurement_part=False,
view=True, filename=None, title=''):
"""Visualization of SEM model via graphviz library.
Keyword arguments:
model -- A SEM model.
structural_part -- Should structural part be visualised?
measurement_part -- Should measurement part be visualised?
view -- Should graph be displayed?
filename -- Filename/path.
title -- Title.
"""
g = gv.Digraph(format='jpg', graph_attr={'label': title})
if structural_part:
g.node_attr.update(color='red', shape='box')
for i, j in model.parameters['Beta']:
lval, rval = model.beta_names[0][i], model.beta_names[0][j]
g.edge(rval, lval)
if measurement_part:
g.node_attr.update(color='black', shape='circle')
for i, j in model.parameters['Lambda']:
lval, rval = model.lambda_names[0][i], model.lambda_names[0][j]
g.edge(lval, rval)
g.render(filename, view=view)
| 38.566667 | 75 | 0.623163 | [
"MIT"
] | YoungjuneKwon/forked-semopy | semopy/visualization.py | 1,157 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .project_task_properties import ProjectTaskProperties
class MigrateSqlServerSqlDbTaskProperties(ProjectTaskProperties):
"""Properties for the task that migrates on-prem SQL Server databases to Azure
SQL Database.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar errors: Array of errors. This is ignored if submitted.
:vartype errors: list[~azure.mgmt.datamigration.models.ODataError]
:ivar state: The state of the task. This is ignored if submitted. Possible
values include: 'Unknown', 'Queued', 'Running', 'Canceled', 'Succeeded',
'Failed', 'FailedInputValidation', 'Faulted'
:vartype state: str or ~azure.mgmt.datamigration.models.TaskState
:ivar commands: Array of command properties.
:vartype commands:
list[~azure.mgmt.datamigration.models.CommandProperties]
:param client_data: Key value pairs of client data to attach meta data
information to task
:type client_data: dict[str, str]
:param task_type: Required. Constant filled by server.
:type task_type: str
:param input: Task input
:type input:
~azure.mgmt.datamigration.models.MigrateSqlServerSqlDbTaskInput
:ivar output: Task output. This is ignored if submitted.
:vartype output:
list[~azure.mgmt.datamigration.models.MigrateSqlServerSqlDbTaskOutput]
"""
_validation = {
'errors': {'readonly': True},
'state': {'readonly': True},
'commands': {'readonly': True},
'task_type': {'required': True},
'output': {'readonly': True},
}
_attribute_map = {
'errors': {'key': 'errors', 'type': '[ODataError]'},
'state': {'key': 'state', 'type': 'str'},
'commands': {'key': 'commands', 'type': '[CommandProperties]'},
'client_data': {'key': 'clientData', 'type': '{str}'},
'task_type': {'key': 'taskType', 'type': 'str'},
'input': {'key': 'input', 'type': 'MigrateSqlServerSqlDbTaskInput'},
'output': {'key': 'output', 'type': '[MigrateSqlServerSqlDbTaskOutput]'},
}
def __init__(self, **kwargs):
super(MigrateSqlServerSqlDbTaskProperties, self).__init__(**kwargs)
self.input = kwargs.get('input', None)
self.output = None
self.task_type = 'Migrate.SqlServer.SqlDb'
| 41.637681 | 82 | 0.641142 | [
"MIT"
] | OlhaTkachenko/azure-sdk-for-python | sdk/datamigration/azure-mgmt-datamigration/azure/mgmt/datamigration/models/migrate_sql_server_sql_db_task_properties.py | 2,873 | Python |
import torch.nn as nn
class AwAWrapper(nn.Module):
def __init__(self, embeddingnet):
super(AwAWrapper, self).__init__()
self.embeddingnet = embeddingnet
| 16.181818 | 42 | 0.685393 | [
"MIT"
] | VisionLearningGroup/SANE | lib/nets/awa_net_wrapper.py | 178 | Python |
from setuptools import find_packages, setup
setup(
# Application info
name="pytorch_common",
version="1.5.3",
author="Mihir Rana",
author_email="[email protected]",
description="Repo for common PyTorch code",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
packages=find_packages(),
test_suite="tests",
# Packages that this package requires
install_requires=[
"numpy>=1.21.2",
"pandas>=1.3.4",
"matplotlib>=3.4.3",
"dask[dataframe]>=2021.11.1",
"toolz==0.10.0",
"scikit-learn>=1.0.1",
"dill>=0.3.4",
"munch>=2.5.0",
"locket==0.2.0",
],
# Optional dependencies
extras_require={"nlp": ["transformers>=4.15.0"]}, # for NLP related projects
# Add config and sql files to the package
# https://python-packaging.readthedocs.io/en/latest/non-code-files.html
include_package_data=True,
)
| 31.03125 | 81 | 0.623364 | [
"MIT"
] | ranamihir/pytorch_common | setup.py | 993 | Python |
from torch import nn
import torch.nn.functional as F
from torch import distributions as pyd
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1):
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x):
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return x.tanh()
def _inverse(self, y):
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return self.atanh(y)
def log_abs_det_jacobian(self, x, y):
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
return 2. * (math.log(2.) - x - F.softplus(-2. * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale):
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
| 30.27451 | 137 | 0.657383 | [
"MIT"
] | willwhitney/sac_dists | distributions/squashed_normal.py | 1,544 | Python |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import pandas as pd
import pytest
import tempfile
import uuid
from unittest.mock import patch
import altair as alt
import matplotlib.pyplot as plt
from bokeh.plotting import figure
from plotly import figure_factory
from polyaxon import settings
from polyaxon.constants.globals import DEFAULT, PLATFORM_DIST_CE
from polyaxon.containers.contexts import (
CONTEXT_ARTIFACTS_FORMAT,
CONTEXT_MOUNT_ARTIFACTS_FORMAT,
CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT,
CONTEXT_OFFLINE_FORMAT,
CONTEXTS_OUTPUTS_SUBPATH_FORMAT,
)
from polyaxon.env_vars import getters
from polyaxon.env_vars.getters import get_run_info
from polyaxon.env_vars.keys import (
POLYAXON_KEYS_COLLECT_ARTIFACTS,
POLYAXON_KEYS_COLLECT_RESOURCES,
POLYAXON_KEYS_LOG_LEVEL,
POLYAXON_KEYS_RUN_INSTANCE,
)
from polyaxon.exceptions import PolyaxonClientException
from polyaxon.polyboard.artifacts import V1ArtifactKind
from polyaxon.polyboard.events import V1Events, get_asset_path, get_event_path
from polyaxon.polyboard.processors.writer import EventFileWriter, ResourceFileWriter
from polyaxon.tracking.run import Run
from polyaxon.utils.path_utils import create_path
from tests.utils import TestEnvVarsCase, tensor_np
@pytest.mark.tracking_mark
class TestRunTracking(TestEnvVarsCase):
def setUp(self):
super().setUp()
settings.CLIENT_CONFIG.is_managed = True
settings.CLIENT_CONFIG.is_offline = True
def test_get_collect_artifacts_return_false_out_cluster(self):
settings.CLIENT_CONFIG.is_managed = False
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "false"
assert getters.get_collect_artifacts() is False
def test_empty_collect_artifacts_path(self):
settings.CLIENT_CONFIG.is_managed = True
assert getters.get_collect_artifacts() is False
def test_valid_artifacts_path(self):
settings.CLIENT_CONFIG.is_managed = True
self.check_valid_value(
POLYAXON_KEYS_COLLECT_ARTIFACTS, getters.get_collect_artifacts, "true", True
)
def test_get_collect_resources_return_false_out_cluster(self):
settings.CLIENT_CONFIG.is_managed = False
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "false"
assert getters.get_collect_resources() is False
def test_empty_collect_resources_path(self):
settings.CLIENT_CONFIG.is_managed = True
assert getters.get_collect_resources() is False
def test_valid_resources_path(self):
settings.CLIENT_CONFIG.is_managed = True
self.check_valid_value(
POLYAXON_KEYS_COLLECT_RESOURCES, getters.get_collect_resources, "true", True
)
def test_get_log_level_out_cluster(self):
settings.CLIENT_CONFIG.is_managed = False
self.check_empty_value(POLYAXON_KEYS_LOG_LEVEL, getters.get_log_level)
def test_empty_log_level(self):
settings.CLIENT_CONFIG.is_managed = True
self.check_empty_value(POLYAXON_KEYS_LOG_LEVEL, getters.get_log_level)
def test_run_info_checks_is_managed(self):
settings.CLIENT_CONFIG.is_managed = False
with self.assertRaises(PolyaxonClientException):
get_run_info()
def test_empty_run_info(self):
self.check_raise_for_invalid_value(
POLYAXON_KEYS_RUN_INSTANCE, get_run_info, None, PolyaxonClientException
)
def test_non_valid_run_info(self):
self.check_raise_for_invalid_value(
POLYAXON_KEYS_RUN_INSTANCE,
get_run_info,
"something random",
PolyaxonClientException,
)
self.check_raise_for_invalid_value(
POLYAXON_KEYS_RUN_INSTANCE,
get_run_info,
"foo.bar",
PolyaxonClientException,
)
def test_dict_run_info(self):
uid = uuid.uuid4().hex
run_info = "user.project_bar.runs.{}".format(uid)
self.check_valid_value(
POLYAXON_KEYS_RUN_INSTANCE,
get_run_info,
run_info,
("user", "project_bar", uid),
)
@patch("polyaxon.managers.base.os.path.expanduser")
def test_run_init(self, expanduser):
expanduser.return_value = tempfile.mkdtemp()
settings.CLIENT_CONFIG.is_managed = False
settings.CLIENT_CONFIG.is_offline = False
with self.assertRaises(PolyaxonClientException):
Run()
# Uses default as owner in non CE
with self.assertRaises(PolyaxonClientException):
Run(project="test")
# Uses default as owner in CE
settings.CLIENT_CONFIG.is_offline = True
settings.CLI_CONFIG.installation = {"dist": PLATFORM_DIST_CE}
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
run = Run(project="test", track_code=False, track_env=False)
assert exit_mock.call_count == 1
assert run.owner == DEFAULT
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
run = Run(
owner="owner-test", project="test", track_code=False, track_env=False
)
assert exit_mock.call_count == 1
assert run.owner == "owner-test"
assert run.project == "test"
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
run = Run(project="owner-test.test")
assert exit_mock.call_count == 1
assert run.owner == "owner-test"
assert run.project == "test"
settings.CLIENT_CONFIG.is_managed = True
settings.CLIENT_CONFIG.is_offline = False
with self.assertRaises(PolyaxonClientException):
Run()
settings.CLI_CONFIG.installation = None
# Uses default as owner in non CE
with self.assertRaises(PolyaxonClientException):
Run(project="test")
# Uses default as owner in CE
settings.CLIENT_CONFIG.is_offline = True
settings.CLI_CONFIG.installation = {"dist": PLATFORM_DIST_CE}
run = Run(project="test")
assert run.owner == DEFAULT
# FQN non CE
settings.CLI_CONFIG.installation = None
os.environ[POLYAXON_KEYS_RUN_INSTANCE] = "user.project_bar.runs.uid"
run = Run()
assert run.owner == "user"
assert run.project == "project_bar"
assert run.run_uuid == "uid"
# FQN CE
settings.CLI_CONFIG.installation = {"dist": PLATFORM_DIST_CE}
os.environ[POLYAXON_KEYS_RUN_INSTANCE] = "user.project_bar.runs.uid"
run = Run()
assert run.owner == "user"
assert run.project == "project_bar"
assert run.run_uuid == "uid"
def test_event_logger_from_non_managed_run(self):
settings.CLIENT_CONFIG.is_managed = False
settings.CLIENT_CONFIG.is_offline = False
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
run = Run(
project="owner-test.test",
track_code=False,
track_env=False,
collect_artifacts=False,
auto_create=False,
)
assert exit_mock.call_count == 1
artifacts_context = CONTEXT_ARTIFACTS_FORMAT.format(run.run_uuid)
assert run.get_artifacts_path() == artifacts_context
assert run.get_outputs_path() == CONTEXTS_OUTPUTS_SUBPATH_FORMAT.format(
artifacts_context
)
assert run._event_logger is None
# Add run id
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
run = Run(
project="owner-test.test",
run_uuid="uuid",
track_code=False,
track_env=False,
collect_artifacts=False,
)
assert exit_mock.call_count == 1
artifacts_context = CONTEXT_ARTIFACTS_FORMAT.format(run.run_uuid)
assert run.get_artifacts_path() == artifacts_context
assert run.get_outputs_path() == CONTEXTS_OUTPUTS_SUBPATH_FORMAT.format(
artifacts_context
)
assert run._event_logger is None
run.set_artifacts_path()
assert run.get_artifacts_path() == CONTEXT_MOUNT_ARTIFACTS_FORMAT.format("uuid")
assert run.get_outputs_path() == CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT.format("uuid")
with patch("polyaxon.tracking.run.EventFileWriter") as mock_call:
run.set_run_event_logger()
assert mock_call.call_count == 1
with patch("polyaxon.tracking.run.ResourceFileWriter") as mock_call:
run.set_run_resource_logger()
assert mock_call.call_count == 1
# Set collect flag
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "true"
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "true"
settings.CLIENT_CONFIG.is_managed = True
with patch("polyaxon.tracking.run.EventFileWriter") as event_call:
with patch("polyaxon.tracking.run.ResourceFileWriter") as resource_call:
with patch("polyaxon.tracking.run.Run.refresh_data") as refresh_call:
with patch(
"polyaxon.tracking.run.Run._set_exit_handler"
) as exit_call:
run = Run(project="owner-test.test", run_uuid="uuid")
assert refresh_call.call_count == 1
assert event_call.call_count == 1
assert resource_call.call_count == 1
assert exit_call.call_count == 1
assert run.get_artifacts_path() == CONTEXT_MOUNT_ARTIFACTS_FORMAT.format("uuid")
assert run.get_outputs_path() == CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT.format("uuid")
def test_event_logger_from_a_managed_run(self):
# Set managed flag
settings.CLIENT_CONFIG.is_managed = True
settings.CLIENT_CONFIG.is_offline = False
os.environ[POLYAXON_KEYS_RUN_INSTANCE] = "user.project_bar.runs.uid"
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "false"
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "false"
with patch("polyaxon.tracking.run.Run.refresh_data") as refresh_call:
run = Run()
assert refresh_call.call_count == 1
assert run.get_artifacts_path() == CONTEXT_MOUNT_ARTIFACTS_FORMAT.format("uid")
assert run.get_outputs_path() == CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT.format("uid")
assert run._event_logger is None
# Set collect flag
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "true"
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "true"
# Add run id
with patch("polyaxon.tracking.run.Run.set_run_event_logger") as event_call:
with patch(
"polyaxon.tracking.run.Run.set_run_resource_logger"
) as resource_call:
with patch("polyaxon.tracking.run.Run.refresh_data") as refresh_call:
with patch(
"polyaxon.tracking.run.Run._set_exit_handler"
) as exit_call:
Run(project="test.test", run_uuid="uuid")
assert event_call.call_count == 1
assert resource_call.call_count == 1
assert refresh_call.call_count == 1
assert exit_call.call_count == 1
# Set run info
os.environ[POLYAXON_KEYS_RUN_INSTANCE] = "user.project_bar.runs.uid"
# Add run id
with patch("polyaxon.tracking.run.Run.set_run_event_logger") as event_call:
with patch(
"polyaxon.tracking.run.Run.set_run_resource_logger"
) as resource_call:
with patch("polyaxon.tracking.run.Run.refresh_data") as refresh_call:
Run()
assert event_call.call_count == 1
assert resource_call.call_count == 1
assert refresh_call.call_count == 1
def test_event_logger_from_an_offline_run(self):
# Set managed flag
settings.CLIENT_CONFIG.is_managed = False
settings.CLIENT_CONFIG.is_offline = True
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "false"
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "false"
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
run = Run(project="test.test", run_uuid="uid")
assert exit_mock.call_count == 1
artifacts_path = CONTEXT_OFFLINE_FORMAT.format("uid")
assert run.get_artifacts_path() == artifacts_path
assert run.get_outputs_path() == CONTEXTS_OUTPUTS_SUBPATH_FORMAT.format(
artifacts_path
)
assert run._event_logger is None
# Set collect flag
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "true"
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "true"
# Add run id
with patch("polyaxon.tracking.run.Run.set_run_event_logger") as event_call:
with patch(
"polyaxon.tracking.run.Run.set_run_resource_logger"
) as resource_call:
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
Run(project="test.test", run_uuid="uid")
assert exit_mock.call_count == 1
assert event_call.call_count == 1
assert resource_call.call_count == 1
@pytest.mark.tracking_mark
class TestRunLogging(TestEnvVarsCase):
def setUp(self):
super().setUp()
self.run_path = tempfile.mkdtemp()
self.run_outputs_path = tempfile.mkdtemp()
settings.CLIENT_CONFIG.is_managed = False
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "false"
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "false"
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
self.run = Run(
project="owner.project",
track_env=False,
track_code=False,
auto_create=False,
)
assert exit_mock.call_count == 1
self.event_logger = EventFileWriter(run_path=self.run_path)
self.resource_logger = ResourceFileWriter(run_path=self.run_path)
self.run._artifacts_path = self.run_path
self.run._outputs_path = self.run_outputs_path
self.run._event_logger = self.event_logger
self.run._resource_logger = self.resource_logger
assert os.path.exists(get_event_path(self.run_path)) is True
assert os.path.exists(get_asset_path(self.run_path)) is True
@staticmethod
def touch(path):
with open(path, "w") as f:
f.write("test")
def test_log_empty_metric(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_metrics") as log_metrics:
self.run.log_metrics()
assert log_metrics.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
def test_log_single_metric(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_metrics") as log_metrics:
self.run.log_metrics(step=1, metric1=1.1)
assert log_metrics.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.METRIC, name="metric1"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="metric", name="metric1", data=events_file)
assert len(results.df.values) == 1
def test_log_multiple_metrics(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_metrics") as log_metrics:
self.run.log_metrics(step=1, metric1=1.1, metric2=21.1)
assert log_metrics.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.METRIC, name="metric1"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="metric", name="metric1", data=events_file)
assert len(results.df.values) == 1
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.METRIC, name="metric2"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="metric", name="metric2", data=events_file)
assert len(results.df.values) == 1
with patch("polyaxon.tracking.run.Run._log_has_metrics") as log_metrics:
self.run.log_metrics(step=2, metric1=1.1, metric2=21.1, metric3=12.1)
assert log_metrics.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.METRIC, name="metric1"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="metric", name="metric1", data=events_file)
assert len(results.df.values) == 2
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.METRIC, name="metric2"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="metric", name="metric2", data=events_file)
assert len(results.df.values) == 2
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.METRIC, name="metric3"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="metric", name="metric3", data=events_file)
assert len(results.df.values) == 1
def test_log_image_from_path(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
image_file = tempfile.mkdtemp() + "/file.png"
self.touch(image_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_image:
self.run.log_image(name="my_image", data=image_file)
assert log_image.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="my_image", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image", ext="png"
)
assert os.path.exists(asset_file) is True
def test_log_image_from_path_with_step(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
image_file = tempfile.mkdtemp() + "/file.png"
self.touch(image_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_image:
self.run.log_image(name="my_image", data=image_file, step=1)
assert log_image.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="my_image", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image", step=1, ext="png"
)
assert os.path.exists(asset_file) is True
def test_log_data_image(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_image:
self.run.log_image(
name="my_image", data=tensor_np(shape=(1, 8, 8)), dataformats="CHW"
)
assert log_image.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="my_image", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image", ext="png"
)
assert os.path.exists(asset_file) is True
def test_log_image_with_boxes(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
image_file = tempfile.mkdtemp() + "/file.png"
self.touch(image_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_image_with_boxes:
self.run.log_image_with_boxes(
name="my_image",
tensor_image=tensor_np(shape=(3, 32, 32)),
tensor_boxes=np.array([[10, 10, 40, 40]]),
)
assert log_image_with_boxes.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="my_image", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image"
)
assert os.path.exists(asset_file) is True
def test_log_mpl_image(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
figure, axes = plt.figure(), plt.gca()
circle1 = plt.Circle((0.2, 0.5), 0.2, color="r")
circle2 = plt.Circle((0.8, 0.5), 0.2, color="g")
axes.add_patch(circle1)
axes.add_patch(circle2)
plt.axis("scaled")
plt.tight_layout()
with patch("polyaxon.tracking.run.Run._log_has_events") as log_mpl_image:
self.run.log_mpl_image(name="figure", data=figure, step=1, close=False)
assert log_mpl_image.call_count == 1
assert plt.fignum_exists(figure.number) is True
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="figure", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure", step=1, ext="png"
)
assert os.path.exists(asset_file) is True
with patch("polyaxon.tracking.run.Run._log_has_events") as log_dashboard:
self.run.log_mpl_image(name="figure", data=figure, step=2)
assert log_dashboard.call_count == 1
assert plt.fignum_exists(figure.number) is False
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="figure", data=events_file)
assert len(results.df.values) == 2
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure", step=1, ext="png"
)
assert os.path.exists(asset_file) is True
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_log_mpl_images(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
figures = []
for i in range(5):
figure = plt.figure()
plt.plot([i * 1, i * 2, i * 3], label="Plot " + str(i))
plt.xlabel("X")
plt.xlabel("Y")
plt.legend()
plt.tight_layout()
figures.append(figure)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_mpl_image:
self.run.log_mpl_image(name="figure", data=figures, step=1, close=False)
assert log_mpl_image.call_count == 1
assert all([plt.fignum_exists(figure.number) is True for figure in figures])
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="figure", data=events_file)
assert len(results.df.values) == 1
with patch("polyaxon.tracking.run.Run._log_has_events") as log_mpl_image:
self.run.log_mpl_image(name="figure", data=figures, step=2)
assert log_mpl_image.call_count == 1
assert all([plt.fignum_exists(figure.number) is False for figure in figures])
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="figure", data=events_file)
assert len(results.df.values) == 2
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure", step=1, ext="png"
)
assert os.path.exists(asset_file) is True
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_log_mpl_plotly(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
figure, axes = plt.figure(), plt.gca()
circle1 = plt.Circle((0.2, 0.5), 0.2, color="r")
circle2 = plt.Circle((0.8, 0.5), 0.2, color="g")
axes.add_patch(circle1)
axes.add_patch(circle2)
plt.axis("scaled")
plt.tight_layout()
with patch("polyaxon.tracking.run.Run._log_has_events") as log_mpl_plotly_chart:
self.run.log_mpl_plotly_chart(name="figure", figure=figure, step=1)
assert log_mpl_plotly_chart.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CHART, name="figure"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="figure", data=events_file)
assert len(results.df.values) == 1
with patch("polyaxon.tracking.run.Run._log_has_events") as log_mpl_plotly_chart:
self.run.log_mpl_plotly_chart(name="figure", figure=figure, step=2)
assert log_mpl_plotly_chart.call_count == 1
assert plt.fignum_exists(figure.number) is False
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CHART, name="figure"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="figure", data=events_file)
assert len(results.df.values) == 2
def test_log_video_from_path(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is False
)
video_file = tempfile.mkdtemp() + "/video.gif"
self.touch(video_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_video:
self.run.log_video(name="my_video", data=video_file)
assert log_video.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.VIDEO, name="my_video"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="video", name="my_video", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.VIDEO, name="my_video", ext="gif"
)
assert os.path.exists(asset_file) is True
def test_log_data_video(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_dashboard:
self.run.log_video(name="my_video", data=tensor_np(shape=(4, 3, 1, 8, 8)))
assert log_dashboard.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.VIDEO, name="my_video"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="video", name="my_video", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.VIDEO, name="my_video", ext="gif"
)
assert os.path.exists(asset_file) is True
def test_log_audio_from_path(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is False
)
audio_file = tempfile.mkdtemp() + "/audio.wav"
self.touch(audio_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_audio:
self.run.log_audio(name="my_audio", data=audio_file)
assert log_audio.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.AUDIO, name="my_audio"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="audio", name="my_audio", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.AUDIO, name="my_audio", ext="wav"
)
assert os.path.exists(asset_file) is True
def test_log_data_audio(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_audio:
self.run.log_audio(name="my_audio", data=tensor_np(shape=(42,)))
assert log_audio.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.AUDIO, name="my_audio"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="audio", name="my_audio", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.AUDIO, name="my_audio", ext="wav"
)
assert os.path.exists(asset_file) is True
def test_log_text(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TEXT))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TEXT))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_text:
self.run.log_text(name="my_text", text="some text", step=1)
assert log_text.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TEXT))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TEXT))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.TEXT, name="my_text"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="text", name="my_text", data=events_file)
assert len(results.df.values) == 1
def test_log_html(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HTML))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HTML))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_html:
self.run.log_html(name="my_div", html="<div>test<div/>", step=1)
assert log_html.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HTML))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HTML))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.HTML, name="my_div"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="html", name="my_div", data=events_file)
assert len(results.df.values) == 1
def test_log_histogram(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_histogram:
self.run.log_histogram(
name="histo", values=tensor_np(shape=(1024,)), bins="auto", step=1
)
self.run.log_histogram(
name="histo", values=tensor_np(shape=(1024,)), bins="fd", step=1
)
self.run.log_histogram(
name="histo", values=tensor_np(shape=(1024,)), bins="doane", step=1
)
assert log_histogram.call_count == 3
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.HISTOGRAM, name="histo"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="histogram", name="histo", data=events_file)
assert len(results.df.values) == 3
def test_log_np_histogram(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is False
)
values, counts = np.histogram(np.random.randint(255, size=(1000,)))
with patch("polyaxon.tracking.run.Run._log_has_events") as log_np_histogram:
self.run.log_np_histogram(
name="histo", values=values, counts=counts, step=1
)
assert log_np_histogram.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.HISTOGRAM, name="histo"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="histogram", name="histo", data=events_file)
assert len(results.df.values) == 1
def test_log_model_file(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert os.path.exists(self.run.get_outputs_path(V1ArtifactKind.MODEL)) is False
model_file = tempfile.mkdtemp() + "/model.pkl"
self.touch(model_file)
with patch("polyaxon.tracking.run.Run.log_model_ref") as log_model:
self.run.log_model(
name="my_model", path=model_file, framework="scikit", versioned=False
)
assert log_model.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert os.path.exists(self.run.get_outputs_path(V1ArtifactKind.MODEL)) is False
model_file = self.run.get_outputs_path("model.pkl")
assert os.path.exists(model_file) is True
def test_log_model_dir(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert os.path.exists(self.run.get_outputs_path(V1ArtifactKind.MODEL)) is False
model_dir = tempfile.mkdtemp() + "/model"
create_path(model_dir)
model_file = model_dir + "/model.pkl"
self.touch(model_file)
weights_file = model_dir + "/weights"
self.touch(weights_file)
configs_file = model_dir + "/configs"
self.touch(configs_file)
with patch("polyaxon.tracking.run.Run.log_model_ref") as log_model:
self.run.log_model(
name="my_model", path=model_dir, framework="tensorflow", versioned=False
)
assert log_model.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert os.path.exists(self.run.get_outputs_path(V1ArtifactKind.MODEL)) is True
model_file = self.run.get_outputs_path(
"{}/{}".format(V1ArtifactKind.MODEL, "model.pkl")
)
assert os.path.exists(model_file) is True
def test_log_versioned_model_file(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
model_file = tempfile.mkdtemp() + "/model.pkl"
self.touch(model_file)
with patch("polyaxon.tracking.run.Run._log_has_model") as log_model:
self.run.log_model(
name="my_model", path=model_file, framework="scikit", step=1
)
assert log_model.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.MODEL, name="my_model"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="model", name="my_model", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.MODEL, name="my_model_1", ext="pkl"
)
assert os.path.exists(asset_file) is True
def test_log_versioned_model_dir(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
model_dir = tempfile.mkdtemp() + "/model"
create_path(model_dir)
model_file = model_dir + "/model.pkl"
self.touch(model_file)
weights_file = model_dir + "/weights"
self.touch(weights_file)
configs_file = model_dir + "/configs"
self.touch(configs_file)
with patch("polyaxon.tracking.run.Run._log_has_model") as log_model:
self.run.log_model(
name="my_model", path=model_dir, framework="tensorflow", step=1
)
assert log_model.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.MODEL, name="my_model"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="model", name="my_model", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.MODEL, name="my_model_1"
)
assert os.path.exists(asset_file) is True
def test_log_dataframe_ref(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
model_file = tempfile.mkdtemp() + "/df.pkl"
self.touch(model_file)
with patch("polyaxon.tracking.run.Run.log_artifact_ref") as log_artifact_ref:
self.run.log_artifact(
name="dataframe",
path=model_file,
kind=V1ArtifactKind.DATAFRAME,
versioned=False,
)
assert log_artifact_ref.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
asset_file = self.run.get_outputs_path(rel_path="df.pkl")
assert os.path.exists(asset_file) is True
def test_log_dataframe(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
df = pd.DataFrame(data=[])
with patch("polyaxon.tracking.run.Run._log_has_events") as log_dataframe:
self.run.log_dataframe(df=df, name="dataframe", content_type="csv")
assert log_dataframe.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.DATAFRAME, name="dataframe"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="dataframe", name="dataframe", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.DATAFRAME, name="dataframe", ext="csv"
)
assert os.path.exists(asset_file) is True
def test_log_artifact(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
tsv_file = tempfile.mkdtemp() + "/file.tsv"
self.touch(tsv_file)
with patch("polyaxon.tracking.run.Run.log_artifact_ref") as log_artifact:
self.run.log_artifact(
name="file",
path=tsv_file,
kind=V1ArtifactKind.TSV,
versioned=False,
)
assert log_artifact.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert os.path.exists(self.run.get_outputs_path("file.tsv")) is True
def test_versioned_log_artifact(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
tsv_file = tempfile.mkdtemp() + "/file.tsv"
self.touch(tsv_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_artifact:
self.run.log_artifact(
name="file", path=tsv_file, kind=V1ArtifactKind.TSV, step=1
)
assert log_artifact.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.TSV, name="file"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind=V1ArtifactKind.TSV, name="file", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.TSV, name="file_1", ext="tsv"
)
assert os.path.exists(asset_file) is True
def test_log_artifact_without_name(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
tsv_file = tempfile.mkdtemp() + "/file.tsv"
self.touch(tsv_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_artifact:
self.run.log_artifact(path=tsv_file, kind=V1ArtifactKind.TSV, step=1)
assert log_artifact.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.TSV, name="file"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind=V1ArtifactKind.TSV, name="file", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.TSV, name="file_1", ext="tsv"
)
assert os.path.exists(asset_file) is True
def test_log_artifact_without_name_and_filename_with_several_dots(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.FILE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.FILE))
is False
)
tar_file = tempfile.mkdtemp() + "/file.tar.gz"
self.touch(tar_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_artifact:
self.run.log_artifact(path=tar_file, kind=V1ArtifactKind.FILE, step=1)
assert log_artifact.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.FILE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.FILE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.FILE, name="file"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind=V1ArtifactKind.FILE, name="file", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.FILE, name="file_1", ext="tar.gz"
)
assert os.path.exists(asset_file) is True
def test_log_versioned_artifacts(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
tsv_file = tempfile.mkdtemp() + "/file.tsv"
self.touch(tsv_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_artifact:
self.run.log_artifact(
name="file", path=tsv_file, kind=V1ArtifactKind.TSV, step=1
)
assert log_artifact.call_count == 1
pd_file = tempfile.mkdtemp() + "/dataframe"
self.touch(pd_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_artifact:
self.run.log_artifact(
name="file2", path=pd_file, kind=V1ArtifactKind.DATAFRAME, step=1
)
assert log_artifact.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is True
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.TSV, name="file"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind=V1ArtifactKind.TSV, name="file", data=events_file)
assert len(results.df.values) == 1
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.DATAFRAME, name="file2"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind=V1ArtifactKind.TSV, name="file", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.TSV, name="file_1", ext="tsv"
)
assert os.path.exists(asset_file) is True
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.DATAFRAME, name="file2_1"
)
assert os.path.exists(asset_file) is True
def test_log_charts(self):
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
bokeh_test = figure(
title="simple line example", x_axis_label="x", y_axis_label="y"
)
bokeh_test.line(x, y, line_width=2)
x1 = np.random.randn(200) - 2
x2 = np.random.randn(200)
x3 = np.random.randn(200) + 2
hist_data = [x1, x2, x3]
group_labels = ["Group 1", "Group 2", "Group 3"]
plotly_test = figure_factory.create_distplot(
hist_data, group_labels, bin_size=[0.1, 0.25, 0.5]
)
df1 = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
alt_test = alt.Chart(df1).mark_bar().encode(x="a", y="b")
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_charts:
self.run.log_bokeh_chart(name="bokeh_test", figure=bokeh_test, step=1)
self.run.log_plotly_chart(name="plotly_test", figure=plotly_test, step=1)
self.run.log_altair_chart(name="alt_test", figure=alt_test, step=1)
assert log_charts.call_count == 3
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CHART, name="bokeh_test"
)
assert os.path.exists(events_file) is True
results = V1Events.read(
kind=V1ArtifactKind.CHART, name="bokeh_test", data=events_file
)
assert len(results.df.values) == 1
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CHART, name="plotly_test"
)
assert os.path.exists(events_file) is True
results = V1Events.read(
kind=V1ArtifactKind.CHART, name="plotly_test", data=events_file
)
assert len(results.df.values) == 1
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CHART, name="alt_test"
)
assert os.path.exists(events_file) is True
results = V1Events.read(
kind=V1ArtifactKind.CHART, name="alt_test", data=events_file
)
assert len(results.df.values) == 1
def test_log_curves(self):
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
with patch("polyaxon.tracking.run.Run._log_has_events") as log_curves:
self.run.log_roc_auc_curve(name="roc_test", fpr=x, tpr=y, auc=0.6, step=1)
self.run.log_pr_curve(
name="pr_test", precision=x, recall=y, average_precision=0.6, step=1
)
self.run.log_curve(name="curve_test", x=x, y=y, annotation=0.6, step=1)
assert log_curves.call_count == 3
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CURVE))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CURVE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CURVE, name="roc_test"
)
assert os.path.exists(events_file) is True
results = V1Events.read(
kind=V1ArtifactKind.CURVE, name="roc_test", data=events_file
)
assert len(results.df.values) == 1
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CURVE, name="pr_test"
)
assert os.path.exists(events_file) is True
results = V1Events.read(
kind=V1ArtifactKind.CHART, name="pr_test", data=events_file
)
assert len(results.df.values) == 1
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CURVE, name="curve_test"
)
assert os.path.exists(events_file) is True
results = V1Events.read(
kind=V1ArtifactKind.CHART, name="curve_test", data=events_file
)
assert len(results.df.values) == 1
| 38.788544 | 88 | 0.620012 | [
"Apache-2.0"
] | AI-App/Polyaxon | src/tests/test_tracking/test_run_tracking.py | 63,652 | Python |
# Copyright 2014 Joe Cora.
# Revisions copyright 2017 Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Objects to represent NEXUS standard data type matrix coding."""
from __future__ import print_function
import sys
from Bio._py3k import basestring
class NexusError(Exception):
pass
class StandardData(object):
"""Create a StandardData iterable object.
Each coding specifies t [type] => (std [standard], multi [multistate] or
uncer [uncertain]) and d [data]
"""
def __init__(self, data):
self._data = []
self._current_pos = 0
# Enforce string data requirement
if not isinstance(data, basestring):
raise NexusError("The coding data given to a StandardData object should be a string")
# Transfer each coding to a position within a sequence
multi_coding = False
uncertain_coding = False
coding_list = {'t': 'std', 'd': []}
for pos, coding in enumerate(data):
# Check if in a multiple coded or uncertain character
if multi_coding:
# End multicoding if close parenthesis
if coding == ')':
multi_coding = False
else:
# Add current coding to list and advance to next coding
coding_list['d'].append(coding)
continue
elif uncertain_coding:
# End multicoding if close parenthesis
if coding == '}':
uncertain_coding = False
else:
# Add current coding to list and advance to next coding
coding_list['d'].append(coding)
continue
else:
# Check if a multiple coded or uncertain character is starting
if coding == '(':
multi_coding = True
coding_list['t'] = 'multi'
continue
elif coding == '{':
uncertain_coding = True
coding_list['t'] = 'uncer'
continue
elif coding in [')', '}']:
raise NexusError('Improper character "' + coding +
'" at position ' + pos +
' of a coding sequence.')
else:
coding_list['d'].append(coding)
# Add character coding to data
self._data.append(coding_list.copy())
coding_list = {'t': 'std', 'd': []}
def __len__(self):
"""Returns the length of the coding, use len(my_coding)."""
return len(self._data)
def __getitem__(self, arg):
return self._data[arg]
def __iter__(self):
return self
def __next__(self):
try:
return_coding = self._data[self._current_pos]
except IndexError:
self._current_pos = 0
raise StopIteration
else:
self._current_pos += 1
return return_coding
if sys.version_info[0] < 3:
def next(self):
"""Deprecated Python 2 style alias for Python 3 style __next__ method."""
return self.__next__()
def raw(self):
"""Returns the full coding as a python list."""
return self._data
def __str__(self):
"""Returns the full coding as a python string, use str(my_coding)."""
str_return = ''
for coding in self._data:
if coding['t'] == 'multi':
str_return += '(' + ''.join(coding['d']) + ')'
elif coding['t'] == 'uncer':
str_return += '{' + ''.join(coding['d']) + '}'
else:
str_return += coding['d'][0]
return str_return
| 33.728814 | 97 | 0.532161 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | EnjoyLifeFund/macHighSierra-py36-pkgs | Bio/Nexus/StandardData.py | 3,980 | Python |
"""
Words analyses by Amardjia Amine.
Free software utility which allows you to find the most frequent phrases
and frequencies of words. French and English language texts are supported.
It also counts number of words, characters, the lexical density,
sentences...etc.
https://github.com/Layto888/Words-Analysis
Usage in command line: python words.py -f [filename.txt] -d [True/False]
-h : for help.
Usage example: python words.py -f test.txt -d True
"""
import argparse
import re
import sys
import time
import platform
import operator
from collections import Counter
from contextlib import redirect_stdout
MAX_DIPLAY = 10
FILE_LEXI = "lexi.wd"
FRENCH_LIST_LENGTH = 78
if platform.system() == 'Linux':
DEFAULT_CODEC = "ISO-8859-1"
elif platform.system() == 'Windows':
DEFAULT_CODEC = None
class Words:
def __init__(self, filename):
"""
Input : text file name
Do some operations to a text and return results.
"""
with open(filename, "r", encoding=DEFAULT_CODEC) as fp:
self.normal_text = fp.read().strip()
self.normalized_text = self.normalize_text(self.normal_text)
def all_characters_without_spaces(self, text):
""" count the total of characters without any space char """
return len(text) - self.number_spaces(text)
def differents_words_list(self, text):
""" select only the total of different words,
it's a set, return the set.
"""
return set(self.words_list(text))
def average_sentence_length(self, text):
""" count the average length of sentences
avg = words / sentences
"""
if len(self.words_list(text)) == 0:
return 0
else:
return len(self.words_list(text)) / len(self.sentence_split(text))
def max_sentence_length(self, text):
""" count and return the maximum length
of sentences list """
all_senteces = self.sentence_split(text)
try:
return (max(list(map(len, all_senteces))))
except Exception as e:
print(e)
return 0
def min_sentence_length(self, text):
""" count and return the minimum length
of sentences list """
all_senteces = self.sentence_split(text)
try:
return (min(list(map(len, all_senteces))))
except Exception as e:
print(e)
return 0
@staticmethod
def normalize_text(normal_text):
""" remove extra spaces if any in the text
and put it in lowercase, to normalize the input text.
"""
normalized_text = re.sub(' +', ' ', normal_text)
normalized_text = normalized_text.lower()
return normalized_text
@staticmethod
def number_spaces(text):
""" count the number of spaces in the text """
return text.count(' ')
@staticmethod
def words_list(text):
""" get all words in a list
return the list of words [a-zA-Z_].
"""
return re.findall("[a-zA-Z]+", text)
@staticmethod
def sentence_split(text):
""" split sentences into list of sentences.
return len(sentence_split(text)) to get the number of sentences.
"""
sentences = re.split('[.!?]', text)
# strip space from the sides.
sentences = [sentence.strip()
for sentence in sentences if len(sentence) > 1]
return sentences
# run the program
def run(filename, write_it):
wa = Words(filename)
# display text basic infos
if write_it:
with open("Output.txt", "w") as fp:
""" if argument value -w is equal to True redirect the output to
a text file if argument value -w is equal to False or not specified
the output will be redirected to the console """
with redirect_stdout(fp):
display(wa)
# display the top 'X' occurrences words
display_top_words(wa, MAX_DIPLAY)
display(wa)
display_top_words(wa, MAX_DIPLAY)
def display(wa):
"""Display all the stuffs on the screen"""
print('Total word count: {}'
.format(len(wa.words_list(wa.normalized_text))))
print('Number of different words: {}'
.format(len(wa.differents_words_list(wa.normalized_text))))
print('Total number of characters: {}'.format(len(wa.normal_text)))
print('Number of characters without spaces: {}'
.format(wa.all_characters_without_spaces(wa.normal_text)))
print('Number of spaces: {}'
.format(wa.number_spaces(wa.normal_text)))
print('Sentence count: {}'
.format(len(wa.sentence_split(wa.normalized_text))))
print('Average sentence length (Words): {0:.2f}'
.format(wa.average_sentence_length(wa.normalized_text)))
print('Max sentence length (Characters): {}'
.format(wa.max_sentence_length(wa.normalized_text)))
print('Min sentence length (Characters): {}'
.format(wa.min_sentence_length(wa.normalized_text)))
print('Lexical density: {0:.2f} %'
.format(lexical_density(wa.words_list(wa.normalized_text), FILE_LEXI)))
print('Language: {} \n'
.format(deduce_language(wa.words_list(wa.normalized_text), FILE_LEXI)))
def display_top_words(wa, max_display):
cp = 0
counts = Counter(wa.words_list(wa.normalized_text))
sorted_occurences = sorted(
counts.items(), key=operator.itemgetter(1), reverse=True)
print('Top 10 recurring words:\n')
print('{0:<30}{1:<30}{2:<30}'.format('# Ref', 'Occurrence', 'Perc'))
for occurence in sorted_occurences:
cp += 1
if cp <= max_display:
print('{0:<30}{1:<30}{2:.2f} %'.format
(
occurence[0],
occurence[1],
(occurence[1] * 100) / len(wa.words_list(wa.normalized_text)))
)
else:
break
def lexical_density(words_list, lexi_file_name):
""" calculates the lexical density.
L_d = (N_lex / N) * 100
Where:
L_d = the analyzed text's lexical density
N_lex = the number of lexical word tokens (nouns,adjectives,verbs,adverbs)
in the analyzed text.
N = the number of all tokens (total number of words) in the analyzed text.
"""
l_d = 0
n_lex = 0
n = 0
with open(lexi_file_name, "r", encoding=DEFAULT_CODEC) as fp:
lexical_words = fp.read()
lexical_words = lexical_words.split(',')
for word in lexical_words:
counter = words_list.count(word)
n_lex += counter
counter = 0
n = len(words_list)
l_d = (n_lex / n) * 100
return l_d
def deduce_language(words_list, lexi_file_name):
"""
This function will deduce language between French and English.
Using the lexical words found on the text.
"""
with open(lexi_file_name, "r", encoding=DEFAULT_CODEC) as fp:
lexical_words = fp.read()
lexical_words = lexical_words.split(',')
for word in words_list:
if word in lexical_words:
if lexical_words.index(word) <= FRENCH_LIST_LENGTH:
return 'French'
else:
return 'English'
return 'Not found'
def show_process_time(t1_start, t1_stop, t2_start, t2_stop):
"""
function to show elapsed time.
"""
print('\n')
print('Elapsed time: {0:.4f} [sec]'.format(t1_stop - t1_start))
print('CPU process time: {0:.4f} [sec]'.format(t2_stop - t2_start))
print('Done.')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file_name', default='test.txt', required=True,
help='The text file to analyze.', type=str)
parser.add_argument('-d', '--display', default=False,
help='display the output into a text file'
' use True/False to specify', type=bool)
args = parser.parse_args()
# compute time perf and time process
t1_start = time.perf_counter()
t2_start = time.process_time()
run(args.file_name, args.display)
t1_stop = time.perf_counter()
t2_stop = time.process_time()
show_process_time(t1_start, t1_stop, t2_start, t2_stop)
return 0
# main
if __name__ == '__main__':
sys.exit(main())
| 32.338346 | 85 | 0.601953 | [
"MIT"
] | Layto888/Words-Analysis | words.py | 8,602 | Python |
import logging as log
import time
from . import gitlab
from .approvals import Approvals
GET, POST, PUT, DELETE = gitlab.GET, gitlab.POST, gitlab.PUT, gitlab.DELETE
class MergeRequest(gitlab.Resource):
@classmethod
def create(cls, api, project_id, params):
merge_request_info = api.call(POST(
'/projects/{project_id}/merge_requests'.format(project_id=project_id),
params,
))
merge_request = cls(api, merge_request_info)
return merge_request
@classmethod
def search(cls, api, project_id, params):
merge_requests = api.collect_all_pages(GET(
'/projects/{project_id}/merge_requests'.format(project_id=project_id),
params,
))
return [cls(api, merge_request) for merge_request in merge_requests]
@classmethod
def fetch_by_iid(cls, project_id, merge_request_iid, api):
merge_request = cls(api, {'iid': merge_request_iid, 'project_id': project_id})
merge_request.refetch_info()
return merge_request
@classmethod
def fetch_all_open_for_user(cls, project_id, user_id, api, merge_order):
all_merge_request_infos = api.collect_all_pages(GET(
'/projects/{project_id}/merge_requests'.format(project_id=project_id),
{'state': 'opened', 'order_by': merge_order, 'sort': 'asc'},
))
my_merge_request_infos = [
mri for mri in all_merge_request_infos
if ((mri.get('assignee', {}) or {}).get('id') == user_id) or
(user_id in [assignee.get('id') for assignee in (mri.get('assignees', []) or [])])
]
return [cls(api, merge_request_info) for merge_request_info in my_merge_request_infos]
@property
def project_id(self):
return self.info['project_id']
@property
def iid(self):
return self.info['iid']
@property
def title(self):
return self.info['title']
@property
def state(self):
return self.info['state']
@property
def merge_status(self):
return self.info['merge_status']
@property
def rebase_in_progress(self):
return self.info.get('rebase_in_progress', False)
@property
def merge_error(self):
return self.info.get('merge_error')
@property
def assignee_ids(self):
if 'assignees' in self.info:
return [assignee.get('id') for assignee in (self.info['assignees'] or [])]
return [(self.info.get('assignee', {}) or {}).get('id')]
@property
def author_id(self):
return self.info['author'].get('id')
@property
def source_branch(self):
return self.info['source_branch']
@property
def target_branch(self):
return self.info['target_branch']
@property
def sha(self):
return self.info['sha']
@property
def squash(self):
return self.info.get('squash', False) # missing means auto-squash not supported
@property
def source_project_id(self):
return self.info['source_project_id']
@property
def target_project_id(self):
return self.info['target_project_id']
@property
def work_in_progress(self):
return self.info['work_in_progress']
@property
def approved_by(self):
return self.info['approved_by']
@property
def web_url(self):
return self.info['web_url']
@property
def force_remove_source_branch(self):
return self.info['force_remove_source_branch']
def refetch_info(self):
self._info = self._api.call(GET('/projects/{0.project_id}/merge_requests/{0.iid}'.format(self)))
def comment(self, message):
if self._api.version().release >= (9, 2, 2):
notes_url = '/projects/{0.project_id}/merge_requests/{0.iid}/notes'.format(self)
else:
# GitLab botched the v4 api before 9.2.2
notes_url = '/projects/{0.project_id}/merge_requests/{0.id}/notes'.format(self)
return self._api.call(POST(notes_url, {'body': message}))
def rebase(self):
self.refetch_info()
if not self.rebase_in_progress:
self._api.call(PUT(
'/projects/{0.project_id}/merge_requests/{0.iid}/rebase'.format(self),
))
else:
# We wanted to rebase and someone just happened to press the button for us!
log.info('A rebase was already in progress on the merge request!')
max_attempts = 30
wait_between_attempts_in_secs = 1
for _ in range(max_attempts):
self.refetch_info()
if not self.rebase_in_progress:
if self.merge_error:
raise MergeRequestRebaseFailed(self.merge_error)
return
time.sleep(wait_between_attempts_in_secs)
raise TimeoutError('Waiting for merge request to be rebased by GitLab')
def accept(self, remove_branch=False, sha=None, merge_when_pipeline_succeeds=True):
return self._api.call(PUT(
'/projects/{0.project_id}/merge_requests/{0.iid}/merge'.format(self),
dict(
should_remove_source_branch=remove_branch,
merge_when_pipeline_succeeds=merge_when_pipeline_succeeds,
sha=sha or self.sha, # if provided, ensures what is merged is what we want (or fails)
),
))
def close(self):
return self._api.call(PUT(
'/projects/{0.project_id}/merge_requests/{0.iid}'.format(self),
{'state_event': 'close'},
))
def assign_to(self, user_id):
return self._api.call(PUT(
'/projects/{0.project_id}/merge_requests/{0.iid}'.format(self),
{'assignee_id': user_id},
))
def unassign(self):
return self.assign_to(0)
def fetch_approvals(self):
# 'id' needed for for GitLab 9.2.2 hack (see Approvals.refetch_info())
info = {'id': self.id, 'iid': self.iid, 'project_id': self.project_id}
approvals = Approvals(self.api, info)
approvals.refetch_info()
return approvals
def fetch_commits(self):
return self._api.call(GET('/projects/{0.project_id}/merge_requests/{0.iid}/commits'.format(self)))
class MergeRequestRebaseFailed(Exception):
pass
| 31.187192 | 106 | 0.626125 | [
"BSD-3-Clause"
] | chmielowiec/marge-bot | marge/merge_request.py | 6,331 | Python |
import json
import logging
import socket
from roombapy.roomba_info import RoombaInfo
class RoombaDiscovery:
udp_bind_address = ""
udp_address = "<broadcast>"
udp_port = 5678
roomba_message = "irobotmcs"
amount_of_broadcasted_messages = 5
server_socket = None
log = None
def __init__(self):
"""Init discovery."""
self.server_socket = _get_socket()
self.log = logging.getLogger(__name__)
def find(self, ip=None):
if ip is not None:
return self.get(ip)
return self.get_all()
def get_all(self):
self._start_server()
self._broadcast_message(self.amount_of_broadcasted_messages)
robots = set()
while True:
response = self._get_response()
if response:
robots.add(response)
else:
break
return robots
def get(self, ip):
self._start_server()
self._send_message(ip)
return self._get_response(ip)
def _get_response(self, ip=None):
try:
while True:
raw_response, addr = self.server_socket.recvfrom(1024)
if ip is not None and addr[0] != ip:
continue
self.log.debug(
"Received response: %s, address: %s", raw_response, addr
)
data = raw_response.decode()
if self._is_from_irobot(data):
return _decode_data(data)
except socket.timeout:
self.log.info("Socket timeout")
return None
def _is_from_irobot(self, data):
if data == self.roomba_message:
return False
json_response = json.loads(data)
if (
"Roomba" in json_response["hostname"]
or "iRobot" in json_response["hostname"]
):
return True
return False
def _broadcast_message(self, amount):
for i in range(amount):
self.server_socket.sendto(
self.roomba_message.encode(), (self.udp_address, self.udp_port)
)
self.log.debug("Broadcast message sent: " + str(i))
def _send_message(self, udp_address):
self.server_socket.sendto(
self.roomba_message.encode(), (udp_address, self.udp_port)
)
self.log.debug("Message sent")
def _start_server(self):
self.server_socket.bind((self.udp_bind_address, self.udp_port))
self.log.debug("Socket server started, port %s", self.udp_port)
def _decode_data(data):
json_response = json.loads(data)
return RoombaInfo(
hostname=json_response["hostname"],
robot_name=json_response["robotname"],
ip=json_response["ip"],
mac=json_response["mac"],
firmware=json_response["sw"],
sku=json_response["sku"],
capabilities=json_response["cap"],
)
def _get_socket():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
server_socket.settimeout(5)
return server_socket
| 28.972477 | 79 | 0.594997 | [
"MIT"
] | Erelen-Laiquendi/roombapy | roombapy/discovery.py | 3,158 | Python |
import gdax
import os
import json
API_KEY = os.environ['GDAX_API_KEY']
API_SECRET = os.environ['GDAX_API_SECRET']
API_PASS = os.environ['GDAX_API_PASS']
def main():
'''
Cancels all bitcoin orders.
'''
client = gdax.AuthenticatedClient(API_KEY, API_SECRET, API_PASS)
r = client.cancel_all(product='LTC-USD')
print(json.dumps(r))
if __name__ == '__main__':
main()
| 18.809524 | 68 | 0.686076 | [
"MIT"
] | griswaldbrooks/gdax-tools | cancel.py | 395 | Python |
import pyupbit
import time
from datetime import datetime
# 초기화 준비
def init_prepairing(investable_coins_map, all_market_codes, all_market_names, order_money):
# 이전 투자 시 코인 별 전날 대비 상승률
prev_coins_map = pyupbit.get_prev_dict(investable_coins_map, all_market_codes, all_market_names)
# 투자할 만한 코인 목록 가져오기
investable_coins_map = get_investable_coin_map(all_market_codes, all_market_names)
slack_message = f"""
현재코인들 수익률 ::: {investable_coins_map}
이전코인들 수익률 ::: {prev_coins_map}
"""
pyupbit.send_message(pyupbit.get_slack_channel(), slack_message)
# 투자 할 코인 1개 가져오기
best_coin = get_best_coin_name(investable_coins_map, prev_coins_map)
# 매수
init(best_coin, order_money)
# 계좌에 보유한 코인이 없는 상태로 만들고 -> 매수 시작!
def init(best_coin='', order_money=0):
init_counter = 0
print(f"이번시간에 투자할 코인은? {best_coin}")
# 가장 살만할 것 같은 코인 매수
response = pyupbit.order_best_coin(best_coin, order_money)
print(f'주문 결과 ::: {response} / uuid ::: {pyupbit.get_order_bid_uuid(response.json())}')
# 주문 성공 시 매수 완료 될 때 까지 대기
if 200 <= response.status_code <= 299:
# 매수 신청 후 매수 될 때까지 대기
while pyupbit.get_my_coin_info() is None:
# 1초에 한번 매수 되었는지 확인
time.sleep(1)
init_counter = init_counter + 1
print('매수 체결 대기 중...')
if init_counter >= 30:
print(f'아직 사지지 않았습니다. 30초 후 다시 초기화 작업 시작합니다..')
# 너무 오래 걸리면 주문 취소, 30초 후 다시 매수 시도
pyupbit.cancel_order(pyupbit.get_order_bid_uuid(response.json()))
time.sleep(30)
init(best_coin, order_money)
# 주문 실패 시 재 주문 시도(10초 후)
else:
print(f'재 주문 시도(10초 후 다시 초기화 작업 시작합니다.)...{response.status_code} / {response.json()}')
time.sleep(10)
init(best_coin, order_money)
# 투자해도 될 것 같은 코인 목록 조회
def get_investable_coin_map(market_codes=[], market_names=[]):
investable_coins_map = {}
i = 0
for code in market_codes:
# coin = { 코인 코드 : 현재가와 1차 저항선 간 차이% }
coin = pyupbit.get_investable_coins(code, market_names[i])
if coin is not None:
investable_coins_map.update(coin)
time.sleep(0.3)
i = i + 1
return investable_coins_map
# 투자해도 될 것 같은 코인 중 가장 좋을 것 같은 코인 조회
def get_best_coin_name(investable_coins_map={}, prev_coins_map={}):
while True:
if dict(investable_coins_map):
print(f'original_map ::: {investable_coins_map}')
if dict(prev_coins_map):
print(f'prev_coins_map ::: {prev_coins_map}')
# 코인 맵에서 이전 상승률 보다 현재 상승률이 낮은 코인 제거
filtered_map = pyupbit.map_filtering(prev_coins_map, investable_coins_map)
print(f'original_map :: {investable_coins_map} / filtered_map :: {filtered_map}')
investable_coins_map = filtered_map
if dict(investable_coins_map):
# investable_coins_map = { 코인 코드 : 현재가와 1차 저항선 간 차이% }
# 투자 대상 코인을 현재가와 1차 저항선 간 차이 기준으로 정렬(asc)
coins_map = sorted(investable_coins_map.items(), reverse=True, key=lambda item: item[1])
# 현재가와 1차 저항선 간 차이가 가장 작은 코인
best_coin = list(coins_map[0])[0]
# 현재가와 1차 저항선 간 차이
coin_dynamic_rate = list(coins_map[0])[1]
slack_message = f"best_coin ::: {best_coin} / change_rate(현재가 - 1차 저항선) ::: {coin_dynamic_rate}%"
print(slack_message)
pyupbit.send_message(pyupbit.get_slack_channel(), slack_message)
return best_coin
else:
slack_message = f':meow_code: 살만한 코인이 없습니다.. 10분 후 다시 초기화 작업 시작합니다..'
print(slack_message)
time.sleep(600)
pyupbit.send_message(pyupbit.get_slack_channel(), slack_message)
return recursive_get_investable_coin_map(prev_coins_map)
# 살만한 코인이 없는 경우 코인 목록 재 조회
def recursive_get_investable_coin_map(prev_coins_map={}):
# 전체 코인 코드
all_market_codes = pyupbit.all_market_names.view_market_codes()
# 전체 코인 이름
all_market_names = pyupbit.all_market_names.view_market_names()
investable_coins_map = get_investable_coin_map(all_market_codes, all_market_names)
return get_best_coin_name(investable_coins_map, prev_coins_map)
# 빡침 스코어 기록기(안씀)
def calc_profit_score(rage_score=0, prev_profit_rate=0, current_profit_rate=0):
"""
매도 할 타이밍은 스코어가 5점 이상인 경우로 한다.
1. 절대 수익률이 100% 보다 높은 경우
- 직전 수익률 보다 떨어졌을 때(+)
rage_score = rage_score + minus_change_rate * 2
- 직전 수익률 보다 올라갔을 때(-)
rage_score = rage_score + minus_change_rate / 2
2. 절대 수익률이 100% 보다 낮은 경우는 그냥 97% 미만일 때 매도 처리(빡침 스코어는 계산)
- 직전 수익률 보다 떨어졌을 때(+)
rage_score = rage_score + minus_change_rate * 2
- 직전 수익률 보다 올라갔을 때(-)
rage_score = rage_score + minus_change_rate * 1.5
3. 빡침 스코어가 마이너스인 경우 0으로 처리
"""
# 마이너스 변동폭(마이너스 / 플러스 반대)
minus_change_rate = prev_profit_rate - current_profit_rate
# 빡침 스코어 계산 하기!
# 수익률 100% 이상
if current_profit_rate >= 100:
# 하락중... (그냥 팔까...)
if minus_change_rate >= 0:
rage_score = rage_score + minus_change_rate * 3
# 상승중! (가즈아!!)
else:
rage_score = rage_score + minus_change_rate / 2
# 수익률 100% 미만
else:
# 하락중... (아..)
if minus_change_rate >= 0:
rage_score = rage_score + minus_change_rate * 2
# 상승중! (제발!!)
else:
rage_score = rage_score + minus_change_rate * 2
slack_message = f'현재 점수는 ::: {round(rage_score, 2)} / 변동폭은 ::: {round(-minus_change_rate, 2)}% / 직전 수익률은 ::: {prev_profit_rate}% / 현재 수익률은 ::: {current_profit_rate}%'
print(slack_message)
if rage_score >= 6.5:
pyupbit.send_message(pyupbit.get_slack_channel(), slack_message)
elif rage_score < 0:
rage_score = 0
return rage_score
# 매도 / 매수 메인 로직(안씀)
def working(market='', my_investment={}, prev_profit_rate=100, score=0, has_minus_exp=False):
# 해당 코인의 현재 상태(분 캔들) 조회
coin_candle = pyupbit.view_candle_min(market)
# 내가 매수 한 코인 단가
buy_unit_price = pyupbit.get_my_coin_unit_price(my_investment)
# 내 계좌에 남은 현금
#krw_balance = pyupbit.get_my_krw_balance(my_investment)
# 내 계좌에 남은 코인 수
#my_coin_balance = pyupbit.get_my_coin_total_amount(my_investment)
# 현재 코인 단가
current_unit_price = pyupbit.get_current_coin_price(coin_candle)
# 수익률(100%가 매수 시점 단가)
profit_rate = pyupbit.get_profit_rate(current_unit_price, buy_unit_price)
# 스코어(매도시점용)
score = calc_profit_score(score, prev_profit_rate, profit_rate)
slack_message1 = f"코인명 ::: {market}(현재빡침점수 : {round(score, 2)}), 매수단가 ::: {buy_unit_price}, 현재단가 ::: {current_unit_price}, 수익률 ::: {str(profit_rate)}%"
print(slack_message1)
if profit_rate < 100:
has_minus_exp = True
# 수익률 한번이라도 100% 미만인 경우 수익률 기준으로 매도 결정
if has_minus_exp and profit_rate >= 100:
pyupbit.sell_all()
pyupbit.send_message(pyupbit.get_slack_channel(), f'[구사일생으로 팔았음.-{str(datetime.today())}]' + slack_message1)
print('sell!!')
else:
# 매수할 만 하고 코인 단가가 내가 샀을때 보다 살짝 떨어져 있을 때 추가 매수 -> 일단 막기!!
# if target_price >= current_unit_price and 99 >= profit_rate >= 97:
# if krw_balance >= 10000:
# 추가 매수 기능 막음
# available_coin_amount = pyupbit.get_possible_order_volume(coin_candle, 10000)
# pyupbit.order_10000(market, available_coin_amount, 'bid')
# pyupbit.send_message('#myinvestment', f'[Buying!!-{str(datetime.today())}]' + slack_message1)
# print('buy!!')
# 매도 매수 시점 판단 빡침 스코어 기준으로 변경!
if score > 5:
pyupbit.sell_all()
pyupbit.send_message(pyupbit.get_slack_channel(), f'[빡쳐서 팔았음!!-{str(datetime.today())}]' + slack_message1)
print('sell!!')
# 수익률이 너무 떨어질 것 같을때 매도
elif profit_rate < 99:
pyupbit.sell_all()
pyupbit.send_message(pyupbit.get_slack_channel(), f'[하락해서 팔았음... -{str(datetime.today())}]' + slack_message1)
print('sell...')
# 그 외 상태일 경우
else:
print('thinking...')
# 수익률, 스코어 반환
return [profit_rate, score, has_minus_exp]
# 잘 될 것 같은 코인 계산(안씀)
def get_rocketboosting_coins(candle_data, market_name):
d = candle_data
# 코인 코드
market = pyupbit.get_market(d)
# 목표 코인 단가( 오늘 시작가 + (어제 고가 - 어제 저가) * 0.5 )
target_price = pyupbit.get_target_price_to_buy(market)
# 코인 현재 단가
current_price = pyupbit.get_current_coin_price(d)
# 전날 대비 변동 률
change_rate = pyupbit.get_change_rate(d)
coin_info = pyupbit.get_coin_info_with_candle(d, market_name)
# 현재 코인 단가가 목표가 보다 높고 단가가 1원 이상인 코인만 필터
if current_price >= target_price and pyupbit.get_today_opening_price(d) > 1:
print(f'대상 : {coin_info}')
pyupbit.send_message(pyupbit.get_slack_channel(), coin_info)
return {market: change_rate}
else:
#print(f'비대상 ::: {coin_info}')
return None
# 코인 변동률 맵 조회(전체)(안씀)
def get_coin_rate_map(market_codes=[]):
result_map = {}
for market in market_codes:
d = pyupbit.get_candle_data(market)
# 전날 대비 변동 률
change_rate = pyupbit.get_change_rate(d)
result_map.update({market: change_rate})
time.sleep(0.2)
return result_map
# 일 캔들 데이터로 코인 정보 조회
def get_coin_info_with_candle(d, market_name):
# 코인 코드
market = pyupbit.get_market(d)
# 목표 코인 단가( 오늘 시작가 + (어제 고가 - 어제 저가) * 0.5 )
target_price = pyupbit.get_target_price_to_buy(market)
# 코인 현재 단가
current_price = pyupbit.get_current_coin_price(d)
# 오늘 시가
today_open_price = pyupbit.get_today_opening_price(d)
# 어제 고가
prev_high_price = pyupbit.get_yesterday_high_price(d)
# 어제 저가
prev_low_price = pyupbit.get_yesterday_low_price(d)
# 기준선
standard_price = pyupbit.calc_standard_line(prev_high_price, prev_low_price, today_open_price)
# 1차 지지선
first_low_price = pyupbit.first_lower_line(standard_price, prev_high_price)
# 2차 지지선
second_low_price = pyupbit.second_lower_line(standard_price, prev_high_price, prev_low_price)
# 1차 저항선
first_high_price = pyupbit.first_higher_line(standard_price, prev_low_price)
# 2차 저항선
second_high_price = pyupbit.second_higher_line(standard_price, prev_high_price, prev_low_price)
coin_info = f"""
현재시간 : {datetime.today()}
코인명: {market} ({market_name}:{str(pyupbit.get_change_rate(d))}%)
opening_p:{str(pyupbit.get_today_opening_price(d))}
high_p(오늘[어제]):{str(pyupbit.get_today_high_price(d))}[{str(pyupbit.get_yesterday_high_price(d))}]
low_p(오늘[어제]):{str(pyupbit.get_today_low_price(d))}[{str(pyupbit.get_yesterday_low_price(d))}]
prev_p:{str(pyupbit.get_yesterday_close_price(d))}
change_p:{str(pyupbit.get_change_price(d))}
기준선 : {standard_price}
1차 지지선 : {first_low_price}
2차 지지선 : {second_low_price}
1차 저항선 : {first_high_price}
2차 저항선 : {second_high_price}
목표가 : {first_high_price}
현재가 : {current_price}
"""
return coin_info
# 목표 코인 단가 계산(안씀)
def get_target_price_to_buy(market="KRW-BTC"):
d = pyupbit.get_candle_data(market)
return d[0]['opening_price'] + (d[1]['high_price'] - d[1]['low_price']) * 0.5
"""
맵 객체 값으로 나쁜 코인 필터링(수익률 필터링)
직전 수익률과 현재 수익률 기준으로
투자 하지 말아야 할 코인들 필터링(직전 보다 현재 가격이 같거나 높은 코인들.. old_value <= new_value)
"""
# 나쁜 코인 필터링
def map_filtering(original_map, new_map):
bad_arr = []
for old_key, old_value in original_map.items():
if old_key in new_map:
new_value = new_map[old_key]
# 요 부등호가 중요함!
if old_value >= new_value:
bad_arr.append(old_key)
print(f'나쁜코인목록 ::: {bad_arr}')
for old_key in bad_arr:
new_map.pop(old_key, None)
return new_map
| 38.770492 | 170 | 0.644228 | [
"BSD-2-Clause"
] | snj830526/py_autoinvestment | pyupbit/strategy.py | 14,197 | Python |
import itertools
import multiprocessing as mp
import os
import pickle
import random
import string
import tempfile
from concurrent.futures import ProcessPoolExecutor
from copy import copy
from functools import partial
from unittest import mock
import numpy as np
import pandas as pd
import pytest
import dask
import dask.dataframe as dd
from dask import delayed
from dask.base import compute_as_if_collection
from dask.dataframe._compat import PANDAS_GT_120, assert_categorical_equal, tm
from dask.dataframe.shuffle import (
_noop,
maybe_buffered_partd,
partitioning_index,
rearrange_by_column,
rearrange_by_divisions,
remove_nans,
shuffle,
)
from dask.dataframe.utils import assert_eq, make_meta
from dask.optimization import cull
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [1, 4, 7]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [2, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [3, 6, 9]}, index=[9, 9, 9]),
}
meta = make_meta(
{"a": "i8", "b": "i8"}, index=pd.Index([], "i8"), parent_meta=pd.DataFrame()
)
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
CHECK_FREQ = {}
if dd._compat.PANDAS_GT_110:
CHECK_FREQ["check_freq"] = False
shuffle_func = shuffle # conflicts with keyword argument
def test_shuffle(shuffle_method):
s = shuffle_func(d, d.b, shuffle=shuffle_method)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == d.npartitions
x = dask.get(s.dask, (s._name, 0))
y = dask.get(s.dask, (s._name, 1))
assert not (set(x.b) & set(y.b)) # disjoint
assert set(s.dask).issuperset(d.dask)
assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name
def test_default_partitions():
assert shuffle(d, d.b).npartitions == d.npartitions
def test_shuffle_npartitions(shuffle_method):
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=10)
s = shuffle(ddf, ddf.x, shuffle=shuffle_method, npartitions=17, max_branch=4)
sc = s.compute()
assert s.npartitions == 17
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))
def test_shuffle_npartitions_lt_input_partitions(shuffle_method):
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=20)
s = shuffle(ddf, ddf.x, shuffle=shuffle_method, npartitions=5, max_branch=2)
sc = s.compute()
assert s.npartitions == 5
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))
def test_index_with_non_series(shuffle_method):
from dask.dataframe.tests.test_multi import list_eq
list_eq(
shuffle(d, d.b, shuffle=shuffle_method), shuffle(d, "b", shuffle=shuffle_method)
)
def test_index_with_dataframe(shuffle_method):
res1 = shuffle(d, d[["b"]], shuffle=shuffle_method).compute()
res2 = shuffle(d, ["b"], shuffle=shuffle_method).compute()
res3 = shuffle(d, "b", shuffle=shuffle_method).compute()
assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())
assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())
def test_shuffle_from_one_partition_to_one_other(shuffle_method):
df = pd.DataFrame({"x": [1, 2, 3]})
a = dd.from_pandas(df, 1)
for i in [1, 2]:
b = shuffle(a, "x", npartitions=i, shuffle=shuffle_method)
assert len(a.compute(scheduler="sync")) == len(b.compute(scheduler="sync"))
def test_shuffle_empty_partitions(shuffle_method):
df = pd.DataFrame({"x": [1, 2, 3] * 10})
ddf = dd.from_pandas(df, npartitions=3)
s = shuffle(ddf, ddf.x, npartitions=6, shuffle=shuffle_method)
parts = compute_as_if_collection(dd.DataFrame, s.dask, s.__dask_keys__())
for p in parts:
assert s.columns == p.columns
df2 = pd.DataFrame(
{
"i32": np.array([1, 2, 3] * 3, dtype="int32"),
"f32": np.array([None, 2.5, 3.5] * 3, dtype="float32"),
"cat": pd.Series(["a", "b", "c"] * 3).astype("category"),
"obj": pd.Series(["d", "e", "f"] * 3),
"bool": np.array([True, False, True] * 3),
"dt": pd.Series(pd.date_range("20130101", periods=9)),
"dt_tz": pd.Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
"td": pd.Series(pd.timedelta_range("2000", periods=9)),
}
)
def test_partitioning_index():
res = partitioning_index(df2.i32, 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()
res = partitioning_index(df2[["i32"]], 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
res = partitioning_index(df2[["cat", "bool", "f32"]], 2)
assert ((0 <= res) & (res < 2)).all()
res = partitioning_index(df2.index, 4)
assert ((res < 4) & (res >= 0)).all()
assert len(np.unique(res)) > 1
def test_partitioning_index_categorical_on_values():
df = pd.DataFrame({"a": list(string.ascii_letters), "b": [1, 2, 3, 4] * 13})
df.a = df.a.astype("category")
df2 = df.copy()
df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))
res = partitioning_index(df.a, 5)
res2 = partitioning_index(df2.a, 5)
assert (res == res2).all()
res = partitioning_index(df, 5)
res2 = partitioning_index(df2, 5)
assert (res == res2).all()
@pytest.mark.parametrize(
"npartitions", [1, 4, 7, pytest.param(23, marks=pytest.mark.slow)]
)
def test_set_index_general(npartitions, shuffle_method):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
assert_eq(df.set_index("x"), ddf.set_index("x", shuffle=shuffle_method))
assert_eq(df.set_index("y"), ddf.set_index("y", shuffle=shuffle_method))
assert_eq(df.set_index(df.x), ddf.set_index(ddf.x, shuffle=shuffle_method))
assert_eq(
df.set_index(df.x + df.y), ddf.set_index(ddf.x + ddf.y, shuffle=shuffle_method)
)
assert_eq(df.set_index(df.x + 1), ddf.set_index(ddf.x + 1, shuffle=shuffle_method))
assert_eq(df.set_index(df.index), ddf.set_index(ddf.index, shuffle=shuffle_method))
def test_set_index_self_index(shuffle_method):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
a = dd.from_pandas(df, npartitions=4)
b = a.set_index(a.index, shuffle=shuffle_method)
assert a is b
assert_eq(b, df.set_index(df.index))
def test_set_index_names(shuffle_method):
if shuffle_method == "disk":
pytest.xfail("dsk names in disk shuffle are not deterministic")
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
assert set(ddf.set_index("x", shuffle=shuffle_method).dask) == set(
ddf.set_index("x", shuffle=shuffle_method).dask
)
assert set(ddf.set_index("x", shuffle=shuffle_method).dask) != set(
ddf.set_index("y", shuffle=shuffle_method).dask
)
assert set(ddf.set_index("x", max_branch=4, shuffle=shuffle_method).dask) != set(
ddf.set_index("x", max_branch=3, shuffle=shuffle_method).dask
)
assert set(ddf.set_index("x", drop=True, shuffle=shuffle_method).dask) != set(
ddf.set_index("x", drop=False, shuffle=shuffle_method).dask
)
def test_set_index_2(shuffle_method):
df = dd.demo.make_timeseries(
"2000",
"2004",
{"value": float, "name": str, "id": int},
freq="2H",
partition_freq="1M",
seed=1,
)
df2 = df.set_index("name", shuffle=shuffle_method)
df2.value.sum().compute(scheduler="sync")
def test_set_index_3(shuffle_method):
df = pd.DataFrame(np.random.random((10, 2)), columns=["x", "y"])
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf.set_index(
"x", shuffle=shuffle_method, max_branch=2, npartitions=ddf.npartitions
)
df2 = df.set_index("x")
assert_eq(df2, ddf2)
assert ddf2.npartitions == ddf.npartitions
def test_shuffle_sort(shuffle_method):
df = pd.DataFrame({"x": [1, 2, 3, 2, 1], "y": [9, 8, 7, 1, 5]})
ddf = dd.from_pandas(df, npartitions=3)
df2 = df.set_index("x").sort_index()
ddf2 = ddf.set_index("x", shuffle=shuffle_method)
assert_eq(ddf2.loc[2:3], df2.loc[2:3])
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_rearrange(shuffle_method, scheduler):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
result = rearrange_by_column(
ddf2, "_partitions", max_branch=32, shuffle=shuffle_method
)
assert result.npartitions == ddf.npartitions
assert set(ddf.dask).issubset(result.dask)
# Every value in exactly one partition
a = result.compute(scheduler=scheduler)
get = dask.base.get_scheduler(scheduler=scheduler)
parts = get(result.dask, result.__dask_keys__())
for i in a._partitions.drop_duplicates():
assert sum(i in set(part._partitions) for part in parts) == 1
def test_rearrange_cleanup():
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle="disk")
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def mock_shuffle_group_3(df, col, npartitions, p):
raise ValueError("Mock exception!")
def test_rearrange_disk_cleanup_with_exception():
# ensure temporary files are cleaned up when there's an internal exception.
with mock.patch("dask.dataframe.shuffle.shuffle_group_3", new=mock_shuffle_group_3):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
with pytest.raises(ValueError, match="Mock exception!"):
result = rearrange_by_column(
ddf2, "_partitions", max_branch=32, shuffle="disk"
)
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def test_rearrange_by_column_with_narrow_divisions():
from dask.dataframe.tests.test_multi import list_eq
A = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
df = rearrange_by_divisions(a, "x", (0, 2, 5))
list_eq(df, a)
def test_maybe_buffered_partd(tmp_path):
import partd
f = maybe_buffered_partd()
p1 = f()
assert isinstance(p1.partd, partd.Buffer)
f2 = pickle.loads(pickle.dumps(f))
assert not f2.buffer
p2 = f2()
assert isinstance(p2.partd, partd.File)
f3 = maybe_buffered_partd(tempdir=tmp_path)
p3 = f3()
assert isinstance(p3.partd, partd.Buffer)
contents = list(tmp_path.iterdir())
assert len(contents) == 1
assert contents[0].suffix == ".partd"
assert contents[0].parent == tmp_path
f4 = pickle.loads(pickle.dumps(f3))
assert not f4.buffer
assert f4.tempdir == tmp_path
def test_set_index_with_explicit_divisions():
df = pd.DataFrame({"x": [4, 1, 2, 5]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2)
def throw(*args, **kwargs):
raise Exception()
with dask.config.set(get=throw):
ddf2 = ddf.set_index("x", divisions=[1, 3, 5])
assert ddf2.divisions == (1, 3, 5)
df2 = df.set_index("x")
assert_eq(ddf2, df2)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("x", divisions=[3, 1, 5])
def test_set_index_divisions_2():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")})
ddf = dd.from_pandas(df, 2)
result = ddf.set_index("y", divisions=["a", "c", "d"])
assert result.divisions == ("a", "c", "d")
assert list(result.compute(scheduler="sync").index[-2:]) == ["d", "d"]
def test_set_index_divisions_compute():
d2 = d.set_index("b", divisions=[0, 2, 9], compute=False)
d3 = d.set_index("b", divisions=[0, 2, 9], compute=True)
assert_eq(d2, d3)
assert_eq(d2, full.set_index("b"))
assert_eq(d3, full.set_index("b"))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)
d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert_eq(d4, d5)
assert_eq(d4, exp)
assert_eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_set_index_divisions_sorted():
p1 = pd.DataFrame({"x": [10, 11, 12], "y": ["a", "a", "a"]})
p2 = pd.DataFrame({"x": [13, 14, 15], "y": ["b", "b", "c"]})
p3 = pd.DataFrame({"x": [16, 17, 18], "y": ["d", "e", "e"]})
ddf = dd.DataFrame(
{("x", 0): p1, ("x", 1): p2, ("x", 2): p3}, "x", p1, [None, None, None, None]
)
df = ddf.compute()
def throw(*args, **kwargs):
raise Exception("Shouldn't have computed")
with dask.config.set(get=throw):
res = ddf.set_index("x", divisions=[10, 13, 16, 18], sorted=True)
assert_eq(res, df.set_index("x"))
with dask.config.set(get=throw):
res = ddf.set_index("y", divisions=["a", "b", "d", "e"], sorted=True)
assert_eq(res, df.set_index("y"))
# with sorted=True, divisions must be same length as df.divisions
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "c", "d", "e"], sorted=True)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "d", "c"], sorted=True)
@pytest.mark.slow
def test_set_index_consistent_divisions():
# See https://github.com/dask/dask/issues/3867
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
ddf = ddf.clear_divisions()
ctx = mp.get_context("spawn")
with ProcessPoolExecutor(8, ctx) as pool:
func = partial(_set_index, df=ddf, idx="x")
divisions_set = set(pool.map(func, range(100)))
assert len(divisions_set) == 1
def _set_index(i, df, idx):
return df.set_index(idx).divisions
def test_set_index_reduces_partitions_small(shuffle_method):
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=50)
ddf2 = ddf.set_index("x", shuffle=shuffle_method, npartitions="auto")
assert ddf2.npartitions < 10
def make_part(n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
def test_set_index_reduces_partitions_large(shuffle_method):
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle_method, npartitions="auto", partition_size=nbytes
)
assert 1 < ddf2.npartitions < 20
def test_set_index_doesnt_increase_partitions(shuffle_method):
nparts = 2
nbytes = 1e6
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle_method, npartitions="auto", partition_size=nbytes
)
assert ddf2.npartitions <= ddf.npartitions
def test_set_index_detects_sorted_data(shuffle_method):
df = pd.DataFrame({"x": range(100), "y": range(100)})
ddf = dd.from_pandas(df, npartitions=10, name="x", sort=False)
ddf2 = ddf.set_index("x", shuffle=shuffle_method)
assert len(ddf2.dask) < ddf.npartitions * 4
def test_set_index_sorts():
# https://github.com/dask/dask/issues/2288
vals = np.array(
[
1348550149000000000,
1348550149000000000,
1348558142000000000,
1348558142000000000,
1348585928000000000,
1348585928000000000,
1348600739000000000,
1348601706000000000,
1348600739000000000,
1348601706000000000,
1348614789000000000,
1348614789000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348637628000000000,
1348638159000000000,
1348638160000000000,
1348638159000000000,
1348638160000000000,
1348637628000000000,
1348646354000000000,
1348646354000000000,
1348659107000000000,
1348657111000000000,
1348659107000000000,
1348657111000000000,
1348672876000000000,
1348672876000000000,
1348682787000000000,
1348681985000000000,
1348682787000000000,
1348681985000000000,
1348728167000000000,
1348728167000000000,
1348730745000000000,
1348730745000000000,
1348750198000000000,
1348750198000000000,
1348750198000000000,
1348753539000000000,
1348753539000000000,
1348753539000000000,
1348754449000000000,
1348754449000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
]
)
vals = pd.to_datetime(vals, unit="ns")
breaks = [10, 36, 58]
dfs = []
for i in range(len(breaks)):
lo = sum(breaks[:i])
hi = sum(breaks[i : i + 1])
dfs.append(pd.DataFrame({"timestamp": vals[lo:hi]}, index=range(lo, hi)))
ddf = dd.concat(dfs).clear_divisions()
assert ddf.set_index("timestamp").index.compute().is_monotonic_increasing is True
@pytest.mark.parametrize(
"engine", ["pandas", pytest.param("cudf", marks=pytest.mark.gpu)]
)
def test_set_index(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
dask_cudf = pytest.importorskip("dask_cudf")
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 2, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [9, 1, 8]}, index=[9, 9, 9]),
}
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
if engine == "cudf":
d = dask_cudf.from_dask_dataframe(d)
full = d.compute()
d2 = d.set_index("b", npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == "b"
assert_eq(d2, full.set_index("b"))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == "b"
assert_eq(d3, full.set_index(full.b))
d4 = d.set_index("b")
assert d4.index.name == "b"
assert_eq(d4, full.set_index("b"))
d5 = d.set_index(["b"])
assert d5.index.name == "b"
assert_eq(d5, full.set_index(["b"]))
@pytest.mark.parametrize(
"engine", ["pandas", pytest.param("cudf", marks=pytest.mark.gpu)]
)
def test_set_index_interpolate(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
df = pd.DataFrame({"x": [4, 1, 1, 3, 3], "y": [1.0, 1, 1, 1, 2]})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=3)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=3)
assert d1.npartitions == 3
assert set(d1.divisions) == {1, 2, 4}
d2 = d.set_index("y", npartitions=3)
assert d2.divisions[0] == 1.0
assert 1.0 < d2.divisions[1] < d2.divisions[2] < 2.0
assert d2.divisions[3] == 2.0
@pytest.mark.parametrize(
"engine", ["pandas", pytest.param("cudf", marks=pytest.mark.gpu)]
)
def test_set_index_interpolate_int(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
L = sorted(list(range(0, 200, 10)) * 2)
df = pd.DataFrame({"x": 2 * L})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=10)
assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)
@pytest.mark.parametrize(
"engine", ["pandas", pytest.param("cudf", marks=pytest.mark.gpu)]
)
def test_set_index_interpolate_large_uint(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
"""This test is for #7304"""
df = pd.DataFrame(
{"x": np.array([612509347682975743, 616762138058293247], dtype=np.uint64)}
)
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 1)
d1 = d.set_index("x", npartitions=1)
assert d1.npartitions == 1
assert set(d1.divisions) == {612509347682975743, 616762138058293247}
def test_set_index_timezone():
s_naive = pd.Series(pd.date_range("20130101", periods=3))
s_aware = pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
df = pd.DataFrame({"tz": s_aware, "notz": s_naive})
d = dd.from_pandas(df, 2)
d1 = d.set_index("notz", npartitions=1)
s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)
assert d1.divisions[0] == s_naive[0] == s1[0]
assert d1.divisions[-1] == s_naive[2] == s1[2]
# We currently lose "freq". Converting data with pandas-defined dtypes
# to numpy or pure Python can be lossy like this.
d2 = d.set_index("tz", npartitions=1)
s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)
assert d2.divisions[0] == s2[0]
assert d2.divisions[-1] == s2[2]
assert d2.divisions[0].tz == s2[0].tz
assert d2.divisions[0].tz is not None
s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)
if PANDAS_GT_120:
# starting with pandas 1.2.0, comparing equality of timestamps with different
# timezones returns False instead of raising an error
assert not d2.divisions[0] == s2badtype[0]
else:
with pytest.raises(TypeError):
d2.divisions[0] == s2badtype[0]
def test_set_index_npartitions():
# https://github.com/dask/dask/issues/6974
data = pd.DataFrame(
index=pd.Index(
["A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "B", "B", "C"]
)
)
data = dd.from_pandas(data, npartitions=2)
output = data.reset_index().set_index("index", npartitions=1)
assert output.npartitions == 1
@pytest.mark.parametrize("unit", ["ns", "us"])
def test_set_index_datetime_precision(unit):
# https://github.com/dask/dask/issues/6864
df = pd.DataFrame(
[
[1567703791155681, 1],
[1567703792155681, 2],
[1567703790155681, 0],
[1567703793155681, 3],
],
columns=["ts", "rank"],
)
df.ts = pd.to_datetime(df.ts, unit=unit)
ddf = dd.from_pandas(df, npartitions=2)
ddf = ddf.set_index("ts")
assert_eq(ddf, df.set_index("ts"))
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop(drop):
pdf = pd.DataFrame(
{
"A": list("ABAABBABAA"),
"B": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"C": [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index("A", drop=drop), pdf.set_index("A", drop=drop))
assert_eq(ddf.set_index("B", drop=drop), pdf.set_index("B", drop=drop))
assert_eq(ddf.set_index("C", drop=drop), pdf.set_index("C", drop=drop))
assert_eq(ddf.set_index(ddf.A, drop=drop), pdf.set_index(pdf.A, drop=drop))
assert_eq(ddf.set_index(ddf.B, drop=drop), pdf.set_index(pdf.B, drop=drop))
assert_eq(ddf.set_index(ddf.C, drop=drop), pdf.set_index(pdf.C, drop=drop))
# numeric columns
pdf = pd.DataFrame(
{
0: list("ABAABBABAA"),
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index(0, drop=drop), pdf.set_index(0, drop=drop))
assert_eq(ddf.set_index(2, drop=drop), pdf.set_index(2, drop=drop))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
msg = r"Dask dataframe does not yet support multi-indexes"
with pytest.raises(NotImplementedError) as err:
ddf.set_index(["a", "b"])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a", "b"]])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a"]])
assert msg in str(err.value)
def test_set_index_sorted_true():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]})
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = a.set_index("x", sorted=True)
assert b.known_divisions
assert set(a.dask).issubset(set(b.dask))
for drop in [True, False]:
assert_eq(a.set_index("x", drop=drop), df.set_index("x", drop=drop))
assert_eq(
a.set_index(a.x, sorted=True, drop=drop), df.set_index(df.x, drop=drop)
)
assert_eq(
a.set_index(a.x + 1, sorted=True, drop=drop),
df.set_index(df.x + 1, drop=drop),
)
with pytest.raises(ValueError):
a.set_index(a.z, sorted=True)
def test_set_index_sorted_single_partition():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(ddf.set_index("x", sorted=True), df.set_index("x"))
def test_set_index_sorted_min_max_same():
a = pd.DataFrame({"x": [1, 2, 3], "y": [0, 0, 0]})
b = pd.DataFrame({"x": [1, 2, 3], "y": [1, 1, 1]})
aa = delayed(a)
bb = delayed(b)
df = dd.from_delayed([aa, bb], meta=a)
assert not df.known_divisions
df2 = df.set_index("y", sorted=True)
assert df2.divisions == (0, 1, 1)
def test_set_index_empty_partition():
test_vals = [1, 2, 3]
converters = [int, float, str, lambda x: pd.to_datetime(x, unit="ns")]
for conv in converters:
df = pd.DataFrame(
[{"x": conv(i), "y": i} for i in test_vals], columns=["x", "y"]
)
ddf = dd.concat(
[
dd.from_pandas(df, npartitions=1),
dd.from_pandas(df[df.y > df.y.max()], npartitions=1),
]
)
assert any(ddf.get_partition(p).compute().empty for p in range(ddf.npartitions))
assert assert_eq(ddf.set_index("x"), df.set_index("x"))
def test_set_index_on_empty():
test_vals = [1, 2, 3, 4]
converters = [int, float, str, lambda x: pd.to_datetime(x, unit="ns")]
for converter in converters:
df = pd.DataFrame([{"x": converter(x), "y": x} for x in test_vals])
ddf = dd.from_pandas(df, npartitions=4)
assert ddf.npartitions > 1
ddf = ddf[ddf.y > df.y.max()].set_index("x")
expected_df = df[df.y > df.y.max()].set_index("x")
assert assert_eq(ddf, expected_df, **CHECK_FREQ)
assert ddf.npartitions == 1
def test_set_index_categorical():
# https://github.com/dask/dask/issues/5671
order = list(reversed(string.ascii_letters))
values = list(string.ascii_letters)
random.shuffle(values)
dtype = pd.api.types.CategoricalDtype(order, ordered=True)
df = pd.DataFrame({"A": pd.Categorical(values, dtype=dtype), "B": 1})
result = dd.from_pandas(df, npartitions=2).set_index("A")
assert len(result) == len(df)
# sorted with the metric defined by the Categorical
divisions = pd.Categorical(result.divisions, dtype=dtype)
assert_categorical_equal(divisions, divisions.sort_values())
def test_compute_divisions():
from dask.dataframe.shuffle import compute_and_set_divisions
df = pd.DataFrame(
{"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]},
index=[1, 3, 10, 20],
)
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = compute_and_set_divisions(copy(a))
assert_eq(a, b, check_divisions=False)
assert b.known_divisions
def test_empty_partitions():
# See https://github.com/dask/dask/issues/2408
df = pd.DataFrame({"a": list(range(10))})
df["b"] = df["a"] % 3
df["c"] = df["b"].astype(str)
ddf = dd.from_pandas(df, npartitions=3)
ddf = ddf.set_index("b")
ddf = ddf.repartition(npartitions=3)
ddf.get_partition(0).compute()
assert_eq(ddf, df.set_index("b"))
ddf = ddf.set_index("c")
assert_eq(ddf, df.set_index("b").set_index("c"))
def test_remove_nans():
tests = [
((1, 1, 2), (1, 1, 2)),
((None, 1, 2), (1, 1, 2)),
((1, None, 2), (1, 2, 2)),
((1, 2, None), (1, 2, 2)),
((1, 2, None, None), (1, 2, 2, 2)),
((None, None, 1, 2), (1, 1, 1, 2)),
((1, None, None, 2), (1, 2, 2, 2)),
((None, 1, None, 2, None, 3, None), (1, 1, 2, 2, 3, 3, 3)),
]
converters = [
(int, np.nan),
(float, np.nan),
(str, np.nan),
(lambda x: pd.to_datetime(x, unit="ns"), np.datetime64("NaT")),
]
for conv, none_val in converters:
for inputs, expected in tests:
params = [none_val if x is None else conv(x) for x in inputs]
expected = [conv(x) for x in expected]
assert remove_nans(params) == expected
@pytest.mark.slow
def test_gh_2730():
large = pd.DataFrame({"KEY": np.arange(0, 50000)})
small = pd.DataFrame({"KEY": np.arange(25, 500)})
dd_left = dd.from_pandas(small, npartitions=3)
dd_right = dd.from_pandas(large, npartitions=257)
with dask.config.set(shuffle="tasks", scheduler="sync"):
dd_merged = dd_left.merge(dd_right, how="inner", on="KEY")
result = dd_merged.compute()
expected = large.merge(small, how="inner", on="KEY")
tm.assert_frame_equal(result.sort_values("KEY").reset_index(drop=True), expected)
@pytest.mark.parametrize("npartitions", [None, "auto"])
def test_set_index_does_not_repeat_work_due_to_optimizations(npartitions):
# Atomic counter
count = itertools.count()
def increment():
next(count)
def make_part(dummy, n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
dsk = {("inc", i): (increment,) for i in range(nparts)}
dsk.update({("x", i): (make_part, ("inc", i), n) for i in range(nparts)})
ddf = dd.DataFrame(dsk, "x", make_part(None, 1), [None] * (nparts + 1))
ddf.set_index("x", npartitions=npartitions)
ntimes = next(count)
assert ntimes == nparts
def test_set_index_errors_with_inplace_kwarg():
df = pd.DataFrame({"a": [9, 8, 7], "b": [6, 5, 4], "c": [3, 2, 1]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.set_index("a")
with pytest.raises(NotImplementedError):
ddf.set_index("a", inplace=True)
def test_set_index_timestamp():
df = pd.DataFrame({"A": pd.date_range("2000", periods=12, tz="US/Central"), "B": 1})
ddf = dd.from_pandas(df, 2)
divisions = (
pd.Timestamp("2000-01-01 00:00:00-0600", tz="US/Central"),
pd.Timestamp("2000-01-12 00:00:00-0600", tz="US/Central"),
)
# Note: `freq` is lost during round trip
df2 = df.set_index("A")
ddf_new_div = ddf.set_index("A", divisions=divisions)
for (ts1, ts2) in zip(divisions, ddf_new_div.divisions):
assert ts1.value == ts2.value
assert ts1.tz == ts2.tz
assert_eq(df2, ddf_new_div, **CHECK_FREQ)
assert_eq(df2, ddf.set_index("A"), **CHECK_FREQ)
@pytest.mark.parametrize("compression", [None, "ZLib"])
def test_disk_shuffle_with_compression_option(compression):
# test if dataframe shuffle works both with and without compression
with dask.config.set({"dataframe.shuffle-compression": compression}):
test_shuffle("disk")
@pytest.mark.parametrize("compression", ["UNKOWN_COMPRESSION_ALGO"])
def test_disk_shuffle_with_unknown_compression(compression):
# test if dask raises an error in case of fault config string
with dask.config.set({"dataframe.shuffle-compression": compression}):
with pytest.raises(
ImportError,
match=(
"Not able to import and load {} as compression algorithm."
"Please check if the library is installed and supported by Partd.".format(
compression
)
),
):
test_shuffle("disk")
def test_disk_shuffle_check_actual_compression():
# test if the compression switch is really respected by testing the size of the actual partd-data on disk
def generate_raw_partd_file(compression):
# generate and write a dummy dataframe to disk and return the raw data bytes
df1 = pd.DataFrame({"a": list(range(10000))})
df1["b"] = (df1["a"] * 123).astype(str)
with dask.config.set({"dataframe.shuffle-compression": compression}):
p1 = maybe_buffered_partd(buffer=False, tempdir=None)()
p1.append({"x": df1})
# get underlying filename from partd - depending on nested structure of partd object
filename = (
p1.partd.partd.filename("x") if compression else p1.partd.filename("x")
)
with open(filename, "rb") as f:
return f.read()
# get compressed and uncompressed raw data
uncompressed_data = generate_raw_partd_file(compression=None)
compressed_data = generate_raw_partd_file(compression="BZ2")
assert len(uncompressed_data) > len(compressed_data)
@pytest.mark.parametrize("ignore_index", [None, True, False])
@pytest.mark.parametrize(
"on", ["id", "name", ["id", "name"], pd.Series(["id", "name"])]
)
@pytest.mark.parametrize("max_branch", [None, 4])
def test_dataframe_shuffle_on_arg(on, ignore_index, max_branch, shuffle_method):
# Make sure DataFrame.shuffle API returns the same result
# whether the ``on`` argument is a list of column names,
# or a separate DataFrame with equivalent values...
df_in = dask.datasets.timeseries(
"2000",
"2001",
types={"value": float, "name": str, "id": int},
freq="2H",
partition_freq="1M",
seed=1,
)
if isinstance(on, str):
ext_on = df_in[[on]].copy()
else:
ext_on = df_in[on].copy()
df_out_1 = df_in.shuffle(
on, shuffle=shuffle_method, ignore_index=ignore_index, max_branch=max_branch
)
df_out_2 = df_in.shuffle(ext_on, shuffle=shuffle_method, ignore_index=ignore_index)
assert_eq(df_out_1, df_out_2, check_index=(not ignore_index))
# disk shuffling doesn't support ignore_index
if ignore_index and shuffle_method == "tasks":
assert df_out_1.index.dtype != df_in.index.dtype
else:
assert df_out_1.index.dtype == df_in.index.dtype
def test_set_index_overlap():
A = pd.DataFrame({"key": [1, 2, 3, 4, 4, 5, 6, 7], "value": list("abcd" * 2)})
a = dd.from_pandas(A, npartitions=2)
a = a.set_index("key", sorted=True)
b = a.repartition(divisions=a.divisions)
assert_eq(a, b)
def test_set_index_overlap_2():
data = pd.DataFrame(
index=pd.Index(
["A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "B", "B", "C"],
name="index",
)
)
ddf1 = dd.from_pandas(data, npartitions=2)
ddf2 = ddf1.reset_index().repartition(8).set_index("index", sorted=True)
assert_eq(ddf1, ddf2)
assert ddf2.npartitions == 8
def test_shuffle_hlg_layer():
# This test checks that the `ShuffleLayer` HLG Layer
# is used (as expected) for a multi-stage shuffle.
ddf = dd.from_pandas(
pd.DataFrame({"a": np.random.randint(0, 10, 100)}), npartitions=10
)
# Disk-based shuffle doesn't use HLG layers at the moment, so we only test tasks
ddf_shuffled = ddf.shuffle("a", max_branch=3, shuffle="tasks")
keys = [(ddf_shuffled._name, i) for i in range(ddf_shuffled.npartitions)]
# Cull the HLG
dsk = ddf_shuffled.__dask_graph__()
dsk_culled = dsk.cull(set(keys))
assert isinstance(dsk_culled, dask.highlevelgraph.HighLevelGraph)
# Ensure we have ShuffleLayers
assert any(
isinstance(layer, dd.shuffle.ShuffleLayer) for layer in dsk.layers.values()
)
# Check that the ShuffleLayers are non-materialized
for layer in dsk.layers.values():
if isinstance(layer, dd.shuffle.ShuffleLayer):
assert not hasattr(layer, "_cached_dict")
# Make sure HLG culling reduces the graph size
assert len(dsk_culled) < len(dsk)
# Check ShuffleLayer names
for name, layer in dsk.layers.items():
if isinstance(layer, dd.shuffle.ShuffleLayer):
assert name.startswith("shuffle-")
# Since we already culled the HLG,
# culling the dictionary should not change the graph
dsk_dict = dict(dsk_culled)
dsk_dict_culled, _ = cull(dsk_dict, keys)
assert dsk_dict_culled == dsk_dict
@pytest.mark.parametrize(
"npartitions",
[
10, # ShuffleLayer
1, # SimpleShuffleLayer
],
)
def test_shuffle_hlg_layer_serialize(npartitions):
ddf = dd.from_pandas(
pd.DataFrame({"a": np.random.randint(0, 10, 100)}), npartitions=npartitions
)
# Disk-based shuffle doesn't use HLG layers at the moment, so we only test tasks
ddf_shuffled = ddf.shuffle("a", max_branch=3, shuffle="tasks")
# Ensure shuffle layers can be serialized and don't result in
# the underlying low-level graph being materialized
dsk = ddf_shuffled.__dask_graph__()
for layer in dsk.layers.values():
if not isinstance(layer, dd.shuffle.SimpleShuffleLayer):
continue
assert not hasattr(layer, "_cached_dict")
layer_roundtrip = pickle.loads(pickle.dumps(layer))
assert type(layer_roundtrip) == type(layer)
assert not hasattr(layer_roundtrip, "_cached_dict")
assert layer_roundtrip.keys() == layer.keys()
def test_set_index_nan_partition():
d[d.a > 3].set_index("a") # Set index with 1 null partition
d[d.a > 1].set_index("a", sorted=True) # Set sorted index with 0 null partitions
a = d[d.a > 3].set_index("a", sorted=True) # Set sorted index with 1 null partition
assert_eq(a, a)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("by", ["a", "b"])
@pytest.mark.parametrize("nelem", [10, 500])
def test_sort_values(nelem, by, ascending):
np.random.seed(0)
df = pd.DataFrame()
df["a"] = np.ascontiguousarray(np.arange(nelem)[::-1])
df["b"] = np.arange(100, nelem + 100)
ddf = dd.from_pandas(df, npartitions=10)
# run on single-threaded scheduler for debugging purposes
with dask.config.set(scheduler="single-threaded"):
got = ddf.sort_values(by=by, ascending=ascending)
expect = df.sort_values(by=by, ascending=ascending)
dd.assert_eq(got, expect, check_index=False)
@pytest.mark.parametrize("ascending", [True, False, [False, True], [True, False]])
@pytest.mark.parametrize("by", [["a", "b"], ["b", "a"]])
@pytest.mark.parametrize("nelem", [10, 500])
def test_sort_values_single_partition(nelem, by, ascending):
np.random.seed(0)
df = pd.DataFrame()
df["a"] = np.ascontiguousarray(np.arange(nelem)[::-1])
df["b"] = np.arange(100, nelem + 100)
ddf = dd.from_pandas(df, npartitions=1)
# run on single-threaded scheduler for debugging purposes
with dask.config.set(scheduler="single-threaded"):
got = ddf.sort_values(by=by, ascending=ascending)
expect = df.sort_values(by=by, ascending=ascending)
dd.assert_eq(got, expect, check_index=False)
@pytest.mark.parametrize("na_position", ["first", "last"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("by", ["a", "b"])
@pytest.mark.parametrize("nparts", [1, 5])
@pytest.mark.parametrize(
"data",
[
{
"a": list(range(50)) + [None] * 50 + list(range(50, 100)), # type: ignore
"b": [None] * 100 + list(range(100, 150)), # type: ignore
},
{
"a": list(range(15)) + [None] * 5, # type: ignore
"b": list(reversed(range(20))),
},
],
)
def test_sort_values_with_nulls(data, nparts, by, ascending, na_position):
df = pd.DataFrame(data)
ddf = dd.from_pandas(df, npartitions=nparts)
# run on single-threaded scheduler for debugging purposes
with dask.config.set(scheduler="single-threaded"):
got = ddf.sort_values(by=by, ascending=ascending, na_position=na_position)
expect = df.sort_values(by=by, ascending=ascending, na_position=na_position)
dd.assert_eq(got, expect, check_index=False)
def test_shuffle_values_raises():
df = pd.DataFrame({"a": [1, 3, 2]})
ddf = dd.from_pandas(df, npartitions=3)
with pytest.raises(
ValueError, match="na_position must be either 'first' or 'last'"
):
ddf.sort_values(by="a", na_position="invalid")
def test_shuffle_by_as_list():
df = pd.DataFrame({"a": [1, 3, 2]})
ddf = dd.from_pandas(df, npartitions=3)
with dask.config.set(scheduler="single-threaded"):
got = ddf.sort_values(by=["a"], npartitions="auto", ascending=True)
expect = pd.DataFrame({"a": [1, 2, 3]})
dd.assert_eq(got, expect, check_index=False)
def test_noop():
assert _noop(1, None) == 1
assert _noop("test", None) == "test"
@pytest.mark.parametrize("by", [["a", "b"], ["b", "a"]])
@pytest.mark.parametrize("nparts", [1, 10])
def test_sort_values_custom_function(by, nparts):
df = pd.DataFrame({"a": [1, 2, 3] * 20, "b": [4, 5, 6, 7] * 15})
ddf = dd.from_pandas(df, npartitions=nparts)
def f(partition, by_columns, ascending, na_position, **kwargs):
return partition.sort_values(
by_columns, ascending=ascending, na_position=na_position
)
# run on single-threaded scheduler for debugging purposes
with dask.config.set(scheduler="single-threaded"):
got = ddf.sort_values(
by=by[0], sort_function=f, sort_function_kwargs={"by_columns": by}
)
expect = df.sort_values(by=by)
dd.assert_eq(got, expect, check_index=False)
def test_sort_values_bool_ascending():
df = pd.DataFrame({"a": [1, 2, 3] * 20, "b": [4, 5, 6, 7] * 15})
ddf = dd.from_pandas(df, npartitions=10)
# attempt to sort with list of ascending booleans
with pytest.raises(NotImplementedError):
ddf.sort_values(by="a", ascending=[True, False])
| 32.982156 | 109 | 0.624355 | [
"BSD-3-Clause"
] | alikefia/dask | dask/dataframe/tests/test_shuffle.py | 44,361 | Python |
from ecsclient.common.other import user_info
user_info = user_info
| 17 | 44 | 0.838235 | [
"Apache-2.0"
] | bcgov/nr-dell-objectstore_admin | ecsclient/v2/other/__init__.py | 68 | Python |
from storage_bucket.bucket import get_bucket
from storage_bucket.client import get_client
from storage_bucket.create import create_bucket
from storage_bucket.delete import delete_bucket
from storage_bucket.delete_file import delete_file
from storage_bucket.download_file import download_file
from storage_bucket.list import list_bucket_names, list_buckets
from storage_bucket.list_files import list_files
from storage_bucket.upload_file import upload_file
__all__ = [ # noqa: WPS410
'get_bucket',
'get_client',
'create_bucket',
'delete_file',
'delete_bucket',
'download_file',
'list_files',
'list_bucket_names',
'list_buckets',
'upload_file',
]
| 29.869565 | 63 | 0.79476 | [
"MIT"
] | thomasborgen/storage-bucket | storage_bucket/__init__.py | 687 | Python |
import os
from dataclasses import dataclass
from tequila import TequilaException, BitString, TequilaWarning
from tequila.hamiltonian import QubitHamiltonian
from tequila.wavefunction import QubitWaveFunction
from tequila.hamiltonian.paulis import Sp, Sm, Qp, Qm
from tequila.circuit import QCircuit, gates, _gates_impl
from tequila.objective.objective import Variable, Variables, ExpectationValue
from tequila.simulators.simulator_api import simulate
from tequila.utils import to_float
from tequila.objective import assign_variable
from .encodings import known_encodings
import typing, numpy, numbers, copy
from itertools import product
# if you are experiencing import errors you need to update openfermion
# required is version >= 1.0
# otherwise replace with from openfermion.hamiltonians import MolecularData
import openfermion
from openfermion.chem import MolecularData
import warnings
@dataclass
class ActiveSpaceData:
active_orbitals: list # active orbitals (spatial, c1)
reference_orbitals: list # reference orbitals (spatial, c1)
def __str__(self):
result = "Active Space Data:\n"
result += "{key:15} : {value:15} \n".format(key="active_orbitals", value=str(self.active_orbitals))
result += "{key:15} : {value:15} \n".format(key="reference_orbitals",
value=str(self.reference_orbitals))
result += "{key:15} : {value:15} \n".format(key="frozen_docc", value=str(self.frozen_docc))
result += "{key:15} : {value:15} \n".format(key="frozen_uocc", value=str(self.frozen_uocc))
return result
@property
def frozen_reference_orbitals(self):
return [i for i in self.reference_orbitals if i not in self.active_orbitals]
@property
def active_reference_orbitals(self):
return [i for i in self.reference_orbitals if i in self.active_orbitals]
class FermionicGateImpl(gates.QubitExcitationImpl):
# keep the overview in circuits
def __init__(self, generator, p0, transformation, *args, **kwargs):
super().__init__(generator=generator, target=generator.qubits, p0=p0, *args, **kwargs)
self._name = "FermionicExcitation"
self.transformation=transformation
def compile(self):
return gates.Trotterized(generator=self.generator, control=self.control, angle=self.parameter, steps=1)
def prepare_product_state(state: BitString) -> QCircuit:
"""Small convenience function
Parameters
----------
state :
product state encoded into a bitstring
state: BitString :
Returns
-------
type
unitary circuit which prepares the product state
"""
result = QCircuit()
for i, v in enumerate(state.array):
if v == 1:
result += gates.X(target=i)
return result
@dataclass
class ParametersQC:
"""Specialization of ParametersHamiltonian"""
basis_set: str = None # Quantum chemistry basis set
geometry: str = None # geometry of the underlying molecule (units: Angstrom!),
# this can be a filename leading to an .xyz file or the geometry given as a string
description: str = ""
multiplicity: int = 1
charge: int = 0
name: str = None
@property
def n_electrons(self, *args, **kwargs):
return self.get_nuc_charge() - self.charge
def get_nuc_charge(self):
return sum(self.get_atom_number(name=atom) for atom in self.get_atoms())
def get_atom_number(self, name):
atom_numbers={"h":1, "he":2, "li":3, "be":4, "b":5, "c":6, "n":7, "o":8, "f":9, "ne":10, "na":11, "mg":12, "al":13, "si":14, "ph":15, "s":16, "cl":17, "ar":18}
if name.lower() in atom_numbers:
return atom_numbers[name.lower()]
try:
import periodictable as pt
atom=name.lower()
atom[0]=atom[0].upper()
element = pt.elements.symbol(atom)
return element.number()
except:
raise TequilaException("can not assign atomic number to element {}\npip install periodictable will fix it".format(atom))
def get_atoms(self):
return [x[0] for x in self.get_geometry()]
def __post_init__(self,*args, **kwargs):
if self.name is None and self.geometry is None:
raise TequilaException("no geometry or name given to molecule\nprovide geometry=filename.xyz or geometry=`h 0.0 0.0 0.0\\n...`\nor name=whatever with file whatever.xyz being present")
# auto naming
if self.name is None:
if ".xyz" in self.geometry:
self.name=self.geometry.split(".xyz")[0]
if self.description is None:
coord, description = self.read_xyz_from_file()
self.description=description
else:
atoms=self.get_atoms()
atom_names=sorted(list(set(atoms)), key=lambda x: self.get_atom_number(x), reverse=True)
if self.name is None:
drop_ones=lambda x: "" if x==1 else x
self.name="".join(["{}{}".format(x,drop_ones(atoms.count(x))) for x in atom_names])
self.name = self.name.lower()
if self.geometry is None:
self.geometry=self.name+".xyz"
if ".xyz" in self.geometry and not os.path.isfile(self.geometry):
raise TequilaException("could not find file for molecular coordinates {}".format(self.geometry))
@property
def filename(self):
""" """
return "{}_{}".format(self.name, self.basis_set)
@property
def molecular_data_param(self) -> dict:
""":return: Give back all parameters for the MolecularData format from openfermion as dictionary"""
return {'basis': self.basis_set, 'geometry': self.get_geometry(), 'description': self.description,
'charge': self.charge, 'multiplicity': self.multiplicity, 'filename': self.filename
}
@staticmethod
def format_element_name(string):
"""OpenFermion uses case sensitive hash tables for chemical elements
I.e. you need to name Lithium: 'Li' and 'li' or 'LI' will not work
this convenience function does the naming
:return: first letter converted to upper rest to lower
Parameters
----------
string :
Returns
-------
"""
assert (len(string) > 0)
assert (isinstance(string, str))
fstring = string[0].upper() + string[1:].lower()
return fstring
@staticmethod
def convert_to_list(geometry):
"""Convert a molecular structure given as a string into a list suitable for openfermion
Parameters
----------
geometry :
a string specifying a mol. structure. E.g. geometry="h 0.0 0.0 0.0\n h 0.0 0.0 1.0"
Returns
-------
type
A list with the correct format for openfermion E.g return [ ['h',[0.0,0.0,0.0], [..]]
"""
result = []
# Remove blank lines
lines = [l for l in geometry.split("\n") if l]
for line in lines:
words = line.split()
# Pad coordinates
if len(words) < 4:
words += [0.0] * (4 - len(words))
try:
tmp = (ParametersQC.format_element_name(words[0]),
(float(words[1]), float(words[2]), float(words[3])))
result.append(tmp)
except ValueError:
print("get_geometry list unknown line:\n ", line, "\n proceed with caution!")
return result
def get_geometry_string(self) -> str:
"""returns the geometry as a string
:return: geometry string
Parameters
----------
Returns
-------
"""
if self.geometry.split('.')[-1] == 'xyz':
geomstring, comment = self.read_xyz_from_file(self.geometry)
if comment is not None:
self.description = comment
return geomstring
else:
return self.geometry
def get_geometry(self):
"""Returns the geometry
If a xyz filename was given the file is read out
otherwise it is assumed that the geometry was given as string
which is then reformatted as a list usable as input for openfermion
:return: geometry as list
e.g. [(h,(0.0,0.0,0.35)),(h,(0.0,0.0,-0.35))]
Units: Angstrom!
Parameters
----------
Returns
-------
"""
if self.geometry.split('.')[-1] == 'xyz':
geomstring, comment = self.read_xyz_from_file(self.geometry)
if self.description == '':
self.description = comment
return self.convert_to_list(geomstring)
elif self.geometry is not None:
return self.convert_to_list(self.geometry)
else:
raise Exception("Parameters.qc.geometry is None")
@staticmethod
def read_xyz_from_file(filename):
"""Read XYZ filetype for molecular structures
https://en.wikipedia.org/wiki/XYZ_file_format
Units: Angstrom!
Parameters
----------
filename :
return:
Returns
-------
"""
with open(filename, 'r') as file:
content = file.readlines()
natoms = int(content[0])
comment = str(content[1]).strip('\n')
coord = ''
for i in range(natoms):
coord += content[2 + i]
return coord, comment
@dataclass
class ClosedShellAmplitudes:
""" """
tIjAb: numpy.ndarray = None
tIA: numpy.ndarray = None
def make_parameter_dictionary(self, threshold=1.e-8):
"""
Parameters
----------
threshold :
(Default value = 1.e-8)
Returns
-------
"""
variables = {}
if self.tIjAb is not None:
nvirt = self.tIjAb.shape[2]
nocc = self.tIjAb.shape[0]
assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)
for (I, J, A, B), value in numpy.ndenumerate(self.tIjAb):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(nocc + A, I, nocc + B, J)] = value
if self.tIA is not None:
nocc = self.tIA.shape[0]
for (I, A), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(A + nocc, I)] = value
return dict(sorted(variables.items(), key=lambda x: numpy.abs(x[1]), reverse=True))
@dataclass
class Amplitudes:
"""Coupled-Cluster Amplitudes
We adopt the Psi4 notation for consistency
I,A for alpha
i,a for beta
Parameters
----------
Returns
-------
"""
@classmethod
def from_closed_shell(cls, cs: ClosedShellAmplitudes):
"""
Initialize from closed-shell Amplitude structure
Parameters
----------
cs: ClosedShellAmplitudes :
Returns
-------
"""
tijab = cs.tIjAb - numpy.einsum("ijab -> ijba", cs.tIjAb, optimize='greedy')
return cls(tIjAb=cs.tIjAb, tIA=cs.tIA, tiJaB=cs.tIjAb, tia=cs.tIA, tijab=tijab, tIJAB=tijab)
tIjAb: numpy.ndarray = None
tIA: numpy.ndarray = None
tiJaB: numpy.ndarray = None
tijab: numpy.ndarray = None
tIJAB: numpy.ndarray = None
tia: numpy.ndarray = None
def make_parameter_dictionary(self, threshold=1.e-8):
"""
Parameters
----------
threshold :
(Default value = 1.e-8)
Neglect amplitudes below the threshold
Returns
-------
Dictionary of tequila variables (hash is in the style of (a,i,b,j))
"""
variables = {}
if self.tIjAb is not None:
nvirt = self.tIjAb.shape[2]
nocc = self.tIjAb.shape[0]
assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)
for (I, j, A, b), value in numpy.ndenumerate(self.tIjAb):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + A), 2 * I, 2 * (nocc + b) + 1, j + 1)] = value
for (i, J, a, B), value in numpy.ndenumerate(self.tiJaB):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + B), J)] = value
for (i, j, a, b), value in numpy.ndenumerate(self.tijab):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + b) + 1, j + 1)] = value
for (I, J, A, B), value in numpy.ndenumerate(self.tijab):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + A), 2 * I, 2 * (nocc + B), J)] = value
if self.tIA is not None:
nocc = self.tIjAb.shape[0]
assert (self.tia.shape[0] == nocc)
for (I, A), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (A + nocc), 2 * I)] = value
for (i, a), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (a + nocc) + 1, 2 * i + 1)] = value
return variables
class NBodyTensor:
""" Convenience class for handling N-body tensors """
class Ordering:
def __init__(self, scheme):
if hasattr(scheme, "_scheme"):
scheme = scheme._scheme
elif hasattr(scheme, "scheme"):
scheme = scheme.scheme
self._scheme = self.assign_scheme(scheme)
def assign_scheme(self, scheme):
if scheme is None:
return "chem"
else:
scheme = str(scheme)
if scheme.lower() in ["mulliken", "chem", "c", "1122"]:
return "chem"
elif scheme.lower() in ["dirac", "phys", "p", "1212"]:
return "phys"
elif scheme.lower() in ["openfermion", "of", "o", "1221"]:
return "of"
else:
raise TequilaException(
"Unknown two-body tensor scheme {}. Supported are dirac, mulliken, and openfermion".format(scheme))
def is_phys(self):
return self._scheme == "phys"
def is_chem(self):
return self._scheme == "chem"
def is_of(self):
return self._scheme == "of"
def __init__(self, elems: numpy.ndarray = None, active_indices: list = None, ordering: str = None,
size_full: int = None):
"""
Parameters
----------
elems: Tensor data as numpy array
active_indices: List of active indices in total ordering
ordering: Ordering scheme for two body tensors
"dirac" or "phys": <12|g|12>
.. math::
g_{pqrs} = \\int d1 d2 p(1)q(2) g(1,2) r(1)s(2)
"mulliken" or "chem": (11|g|22)
.. math::
g_{pqrs} = \\int d1 d2 p(1)r(2) g(1,2) q(1)s(2)
"openfermion":
.. math:: [12|g|21]
g_{gqprs} = \\int d1 d2 p(1)q(2) g(1,2) s(1)r(2)
size_full
"""
# Set elements
self.elems = elems
# Active indices only as list of indices (e.g. spatial orbital indices), not as a dictionary of irreducible
# representations
if active_indices is not None:
self.active_indices = active_indices
self._passive_indices = None
self._full_indices = None
self._indices_set: bool = False
# Determine order of tensor
# Assume, that tensor is entered in desired shape, not as flat array.
self.order = len(self.elems.shape)
# Can use size_full < self.elems.shape[0] -> 'full' space is to be considered a subspace as well
if size_full is None:
self._size_full = self.elems.shape[0]
else:
self._size_full = size_full
# 2-body tensors (<=> order 4) currently allow reordering
if self.order == 4:
self.ordering = self.Ordering(ordering)
else:
if ordering is not None:
raise Exception("Ordering only implemented for tensors of order 4 / 2-body tensors.")
self.ordering = None
def sub_lists(self, idx_lists: list = None) -> numpy.ndarray:
"""
Get subspace of tensor by a set of index lists
according to hPQ.sub_lists(idx_lists=[p, q]) = [hPQ for P in p and Q in q]
This essentially is an implementation of a non-contiguous slicing using numpy.take
Parameters
----------
idx_lists :
List of lists, each defining the desired subspace per axis
Size needs to match order of tensor, and lists successively correspond to axis=0,1,2,...,N
Returns
-------
out :
Sliced tensor as numpy.ndarray
"""
# Check if index list has correct size
if len(idx_lists) != self.order:
raise Exception("Need to pass an index list for each dimension!" +
" Length of idx_lists needs to match order of tensor.")
# Perform slicing via numpy.take
out = self.elems
for ax in range(self.order):
if idx_lists[ax] is not None: # None means, we want the full space in this direction
out = numpy.take(out, idx_lists[ax], axis=ax)
return out
def set_index_lists(self):
""" Set passive and full index lists based on class inputs """
tmp_size = self._size_full
if self._size_full is None:
tmp_size = self.elems.shape[0]
self._passive_indices = [i for i in range(tmp_size)
if i not in self.active_indices]
self._full_indices = [i for i in range(tmp_size)]
def sub_str(self, name: str) -> numpy.ndarray:
"""
Get subspace of tensor by a string
Currently is able to resolve an active space, named 'a', full space 'f', and the complement 'p' = 'f' - 'a'.
Full space in this context may also be smaller than actual tensor dimension.
The specification of active space in this context only allows to pick a set from a list of orbitals, and
is not able to resolve an active space from irreducible representations.
Example for one-body tensor:
hPQ.sub_lists(name='ap') = [hPQ for P in active_indices and Q in _passive_indices]
Parameters
----------
name :
String specifying the desired subspace, elements need to be a (active), f (full), p (full - active)
Returns
-------
out :
Sliced tensor as numpy.ndarray
"""
if not self._indices_set:
self.set_index_lists()
self._indices_set = True
if name is None:
raise Exception("No name specified.")
if len(name) != self.order:
raise Exception("Name does not match order of the tensor.")
if self.active_indices is None:
raise Exception("Need to set an active space in order to call this function.")
idx_lists = []
# Parse name as string of space indices
for char in name:
if char.lower() == 'a':
idx_lists.append(self.active_indices)
elif char.lower() == 'p':
idx_lists.append(self._passive_indices)
elif char.lower() == 'f':
if self._size_full is None:
idx_lists.append(None)
else:
idx_lists.append(self._full_indices)
else:
raise Exception("Need to specify a valid letter (a,p,f).")
out = self.sub_lists(idx_lists)
return out
def reorder(self, to: str = 'of'):
"""
Function to reorder tensors according to some convention.
Parameters
----------
to :
Ordering scheme of choice.
'openfermion', 'of' (default) :
openfermion - ordering, corresponds to integrals of the type
h^pq_rs = int p(1)* q(2)* O(1,2) r(2) s(1) (O(1,2)
with operators a^pq_rs = a^p a^q a_r a_s (a^p == a^dagger_p)
currently needed for dependencies on openfermion-library
'chem', 'c' :
quantum chemistry ordering, collect particle terms,
more convenient for real-space methods
h^pq_rs = int p(1) q(1) O(1,2) r(2) s(2)
This is output by psi4
'phys', 'p' :
typical physics ordering, integrals of type
h^pq_rs = int p(1)* q(2)* O(1,2) r(1) s(2)
with operators a^pq_rs = a^p a^q a_s a_r
Returns
-------
"""
if self.order != 4:
raise Exception('Reordering currently only implemented for two-body tensors.')
to = self.Ordering(to)
if self.ordering == to:
return self
elif self.ordering.is_chem():
if to.is_of():
self.elems = numpy.einsum("psqr -> pqrs", self.elems, optimize='greedy')
elif to.is_phys():
self.elems = numpy.einsum("prqs -> pqrs", self.elems, optimize='greedy')
elif self.ordering.is_of():
if to.is_chem():
self.elems = numpy.einsum("pqrs -> psqr", self.elems, optimize='greedy')
elif to.is_phys():
self.elems = numpy.einsum("pqrs -> pqsr", self.elems, optimize='greedy')
elif self.ordering.is_phys():
if to.is_chem():
self.elems = numpy.einsum("pqrs -> prqs", self.elems, optimize='greedy')
elif to.is_of():
self.elems = numpy.einsum("pqsr -> pqrs", self.elems, optimize='greedy')
return self
class QuantumChemistryBase:
def __init__(self, parameters: ParametersQC,
transformation: typing.Union[str, typing.Callable] = None,
active_orbitals: list = None,
*args,
**kwargs):
self.parameters = parameters
if "molecule" in kwargs:
self.molecule = kwargs["molecule"]
else:
self.molecule = self.make_molecule(*args, **kwargs)
assert (parameters.basis_set.lower() == self.molecule.basis.lower())
assert (parameters.multiplicity == self.molecule.multiplicity)
assert (parameters.charge == self.molecule.charge)
self.active_space = None
if active_orbitals is not None:
self.active_space = self._make_active_space_data(active_orbitals=active_orbitals)
self.transformation = self._initialize_transformation(transformation=transformation, *args, **kwargs)
self._rdm1 = None
self._rdm2 = None
def _initialize_transformation(self, transformation=None, *args, **kwargs):
if transformation is None:
transformation = "JordanWigner"
# filter out arguments to the transformation
trafo_args = {k.split("__")[1]: v for k, v in kwargs.items() if
(hasattr(k, "lower") and "transformation__" in k.lower())}
trafo_args["n_electrons"] = self.n_electrons
trafo_args["n_orbitals"] = self.n_orbitals
if hasattr(transformation, "upper"):
# format to conventions
transformation = transformation.replace("_", "").replace("-", "").upper()
encodings = known_encodings()
if transformation in encodings:
transformation = encodings[transformation](**trafo_args)
else:
raise TequilaException(
"Unkown Fermion-to-Qubit encoding {}. Try something like: {}".format(transformation,
list(encodings.keys())))
return transformation
def _make_active_space_data(self, active_orbitals, reference=None):
"""
Small helper function
Internal use only
Parameters
----------
active_orbitals: dictionary :
list: Give a list of spatial orbital indices
i.e. occ = [0,1,3] means that spatial orbital 0, 1 and 3 are used
reference: (Default value=None)
List of orbitals which form the reference
Can be given in the same format as active_orbitals
If given as None then the first N_electron/2 orbitals are taken
for closed-shell systems.
Returns
-------
Dataclass with active indices and reference indices (in spatial notation)
"""
if active_orbitals is None:
return None
if reference is None:
# auto assignment only for closed-shell
assert (self.n_electrons % 2 == 0)
reference = sorted([i for i in range(self.n_electrons // 2)])
return ActiveSpaceData(active_orbitals=sorted(active_orbitals),
reference_orbitals=sorted(reference))
@classmethod
def from_openfermion(cls, molecule: openfermion.MolecularData,
transformation: typing.Union[str, typing.Callable] = None,
*args,
**kwargs):
"""
Initialize direclty from openfermion MolecularData object
Parameters
----------
molecule
The openfermion molecule
Returns
-------
The Tequila molecule
"""
parameters = ParametersQC(basis_set=molecule.basis, geometry=molecule.geometry,
description=molecule.description, multiplicity=molecule.multiplicity,
charge=molecule.charge)
return cls(parameters=parameters, transformation=transformation, molecule=molecule, *args, **kwargs)
def make_excitation_generator(self,
indices: typing.Iterable[typing.Tuple[int, int]],
form: str = None,
remove_constant_term: bool = True) -> QubitHamiltonian:
"""
Notes
----------
Creates the transformed hermitian generator of UCC type unitaries:
M(a^\dagger_{a_0} a_{i_0} a^\dagger{a_1}a_{i_1} ... - h.c.)
where the qubit map M depends is self.transformation
Parameters
----------
indices : typing.Iterable[typing.Tuple[int, int]] :
List of tuples [(a_0, i_0), (a_1, i_1), ... ] - recommended format, in spin-orbital notation (alpha odd numbers, beta even numbers)
can also be given as one big list: [a_0, i_0, a_1, i_1 ...]
form : str : (Default value None):
Manipulate the generator to involution or projector
set form='involution' or 'projector'
the default is no manipulation which gives the standard fermionic excitation operator back
remove_constant_term: bool: (Default value True):
by default the constant term in the qubit operator is removed since it has no effect on the unitary it generates
if the unitary is controlled this might not be true!
Returns
-------
type
1j*Transformed qubit excitation operator, depends on self.transformation
"""
if type(self.transformation).__name__ == "BravyiKitaevFast":
raise TequilaException(
"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet")
# check indices and convert to list of tuples if necessary
if len(indices) == 0:
raise TequilaException("make_excitation_operator: no indices given")
elif not isinstance(indices[0], typing.Iterable):
if len(indices) % 2 != 0:
raise TequilaException("make_excitation_generator: unexpected input format of indices\n"
"use list of tuples as [(a_0, i_0),(a_1, i_1) ...]\n"
"or list as [a_0, i_0, a_1, i_1, ... ]\n"
"you gave: {}".format(indices))
converted = [(indices[2 * i], indices[2 * i + 1]) for i in range(len(indices) // 2)]
else:
converted = indices
# convert everything to native python int
# otherwise openfermion will complain
converted = [(int(pair[0]), int(pair[1])) for pair in converted]
# convert to openfermion input format
ofi = []
dag = []
for pair in converted:
assert (len(pair) == 2)
ofi += [(int(pair[0]), 1),
(int(pair[1]), 0)] # openfermion does not take other types of integers like numpy.int64
dag += [(int(pair[0]), 0), (int(pair[1]), 1)]
op = openfermion.FermionOperator(tuple(ofi), 1.j) # 1j makes it hermitian
op += openfermion.FermionOperator(tuple(reversed(dag)), -1.j)
if isinstance(form, str) and form.lower() != 'fermionic':
# indices for all the Na operators
Na = [x for pair in converted for x in [(pair[0], 1), (pair[0], 0)]]
# indices for all the Ma operators (Ma = 1 - Na)
Ma = [x for pair in converted for x in [(pair[0], 0), (pair[0], 1)]]
# indices for all the Ni operators
Ni = [x for pair in converted for x in [(pair[1], 1), (pair[1], 0)]]
# indices for all the Mi operators
Mi = [x for pair in converted for x in [(pair[1], 0), (pair[1], 1)]]
# can gaussianize as projector or as involution (last is default)
if form.lower() == "p+":
op *= 0.5
op += openfermion.FermionOperator(Na + Mi, 0.5)
op += openfermion.FermionOperator(Ni + Ma, 0.5)
elif form.lower() == "p-":
op *= 0.5
op += openfermion.FermionOperator(Na + Mi, -0.5)
op += openfermion.FermionOperator(Ni + Ma, -0.5)
elif form.lower() == "g+":
op += openfermion.FermionOperator([], 1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, -1.0)
op += openfermion.FermionOperator(Ni + Ma, -1.0)
elif form.lower() == "g-":
op += openfermion.FermionOperator([], -1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, 1.0)
op += openfermion.FermionOperator(Ni + Ma, 1.0)
elif form.lower() == "p0":
# P0: we only construct P0 and don't keep the original generator
op = openfermion.FermionOperator([], 1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, -1.0)
op += openfermion.FermionOperator(Ni + Ma, -1.0)
else:
raise TequilaException(
"Unknown generator form {}, supported are G, P+, P-, G+, G- and P0".format(form))
qop = self.transformation(op)
# remove constant terms
# they have no effect in the unitary (if not controlled)
if remove_constant_term:
qop.qubit_operator.terms[tuple()] = 0.0
# check if the operator is hermitian and cast coefficients to floats
# in order to avoid trouble with the simulation backends
assert qop.is_hermitian()
for k, v in qop.qubit_operator.terms.items():
qop.qubit_operator.terms[k] = to_float(v)
qop = qop.simplify()
if len(qop) == 0:
warnings.warn("Excitation generator is a unit operator.\n"
"Non-standard transformations might not work with general fermionic operators\n"
"indices = " + str(indices), category=TequilaWarning)
return qop
def make_hardcore_boson_excitation_gate(self, indices, angle, control=None, assume_real=True, compile_options="optimize"):
target = []
for pair in indices:
assert len(pair) == 2
target += [pair[0], pair[1]]
consistency = [x < self.n_orbitals for x in target]
if not all(consistency):
raise TequilaException(
"make_hardcore_boson_excitation_gate: Inconsistencies in indices={}. Should be indexed from 0 ... n_orbitals={}".format(
indices, self.n_orbitals))
return gates.QubitExcitation(angle=angle, target=target, assume_real=assume_real, control=control, compile_options=compile_options)
def make_excitation_gate(self, indices, angle, control=None, assume_real=True, **kwargs):
"""
Initialize a fermionic excitation gate defined as
.. math::
e^{-i\\frac{a}{2} G}
with generator defines by the indices [(p0,q0),(p1,q1),...]
.. math::
G = i(\\prod_{k} a_{p_k}^\\dagger a_{q_k} - h.c.)
Parameters
----------
indices:
List of tuples that define the generator
angle:
Numeric or hashable type or tequila objective
control:
List of possible control qubits
assume_real:
Assume that the wavefunction will always stay real.
Will reduce potential gradient costs by a factor of 2
"""
generator = self.make_excitation_generator(indices=indices, remove_constant_term=control is None)
p0 = self.make_excitation_generator(indices=indices, form="P0", remove_constant_term=control is None)
return QCircuit.wrap_gate(
FermionicGateImpl(angle=angle, generator=generator, p0=p0, transformation=type(self.transformation).__name__.lower(), assume_real=assume_real, control=control, **kwargs))
def make_molecule(self, *args, **kwargs) -> MolecularData:
"""Creates a molecule in openfermion format by running psi4 and extracting the data
Will check for previous outputfiles before running
Will not recompute if a file was found
Parameters
----------
parameters :
An instance of ParametersQC, which also holds an instance of ParametersPsi4 via parameters.psi4
The molecule will be saved in parameters.filename, if this file exists before the call the molecule will be imported from the file
Returns
-------
type
the molecule in openfermion.MolecularData format
"""
molecule = MolecularData(**self.parameters.molecular_data_param)
# try to load
do_compute = True
try:
import os
if os.path.exists(self.parameters.filename):
molecule.load()
do_compute = False
except OSError:
do_compute = True
if do_compute:
molecule = self.do_make_molecule(*args, **kwargs)
molecule.save()
return molecule
def do_make_molecule(self, *args, **kwargs):
"""
Parameters
----------
args
kwargs
Returns
-------
"""
# integrals need to be passed in base class
assert ("one_body_integrals" in kwargs)
assert ("two_body_integrals" in kwargs)
one_body_integrals = kwargs["one_body_integrals"]
two_body_integrals = kwargs["two_body_integrals"]
# tequila assumes "openfermion" ordering, integrals can however be passed
# down in other orderings, but it needs to be indicated by keyword
if "ordering" in kwargs:
two_body_integrals = NBodyTensor(two_body_integrals, ordering=kwargs["ordering"])
two_body_integrals.reorder(to="openfermion")
two_body_integrals = two_body_integrals.elems
if "nuclear_repulsion" in kwargs:
nuclear_repulsion = kwargs["nuclear_repulsion"]
else:
nuclear_repulsion = 0.0
warnings.warn("No nuclear_repulsion given for custom molecule, setting to zero", category=TequilaWarning)
if ("n_orbitals" in kwargs):
n_orbitals = kwargs["n_orbitals"]
else:
n_orbitals = one_body_integrals.shape[0]
for i in [0, 1, 2, 3]:
assert n_orbitals == two_body_integrals.shape[i]
molecule = MolecularData(**self.parameters.molecular_data_param)
molecule.one_body_integrals = one_body_integrals
molecule.two_body_integrals = two_body_integrals
molecule.nuclear_repulsion = nuclear_repulsion
molecule.n_orbitals = n_orbitals
if "n_electrons" in kwargs:
molecule.n_electrons = kwargs["n_electrons"]
molecule.save()
return molecule
@property
def n_orbitals(self) -> int:
""" """
if self.active_space is None:
return self.molecule.n_orbitals
else:
return len(self.active_space.active_orbitals)
@property
def n_electrons(self) -> int:
""" """
if self.active_space is None:
return self.molecule.n_electrons
else:
return 2 * len(self.active_space.active_reference_orbitals)
def make_hamiltonian(self, occupied_indices=None, active_indices=None, threshold=1.e-8) -> QubitHamiltonian:
""" """
if occupied_indices is None and self.active_space is not None:
occupied_indices = self.active_space.frozen_reference_orbitals
if active_indices is None and self.active_space is not None:
active_indices = self.active_space.active_orbitals
fop = openfermion.transforms.get_fermion_operator(
self.molecule.get_molecular_hamiltonian(occupied_indices, active_indices))
try:
qop = self.transformation(fop)
except TypeError:
qop = self.transformation(openfermion.transforms.get_interaction_operator(fop))
qop.is_hermitian()
return qop
def make_hardcore_boson_hamiltonian(self):
if not self.transformation.up_then_down:
warnings.warn(
"Hardcore-Boson Hamiltonian without reordering will result in non-consecutive Hamiltonians that are eventually not be combinable with other features of tequila. Try transformation=\'ReorderedJordanWigner\' or similar for more consistency",
TequilaWarning)
# integrate with QubitEncoding at some point
n_orbitals = self.n_orbitals
c, obt, tbt = self.get_integrals()
h = numpy.zeros(shape=[n_orbitals] * 2)
g = numpy.zeros(shape=[n_orbitals] * 2)
for p in range(n_orbitals):
h[p, p] += 2 * obt[p, p]
for q in range(n_orbitals):
h[p, q] += + tbt[p, p, q, q]
if p != q:
g[p, q] += 2 * tbt[p, q, q, p] - tbt[p, q, p, q]
H = c
for p in range(n_orbitals):
for q in range(n_orbitals):
up = p
uq = q
H += h[p, q] * Sm(up) * Sp(uq) + g[p, q] * Sm(up) * Sp(up) * Sm(uq) * Sp(uq)
return H
def make_molecular_hamiltonian(self):
if self.active_space:
return self.molecule.get_molecular_hamiltonian(occupied_indices=self.active_space.frozen_reference_orbitals,
active_indices=self.active_space.active_orbitals)
else:
return self.molecule.get_molecular_hamiltonian()
def get_integrals(self, two_body_ordering="openfermion"):
"""
Returns
-------
Tuple with:
constant part (nuclear_repulsion + possible integrated parts from active-spaces)
one_body_integrals
two_body_integrals
"""
if self.active_space is not None and len(self.active_space.frozen_reference_orbitals) > 0:
c, h1, h2 = self.molecule.get_active_space_integrals(active_indices=self.active_space.active_orbitals,
occupied_indices=self.active_space.frozen_reference_orbitals)
else:
c = 0.0
h1 = self.molecule.one_body_integrals
h2 = self.molecule.two_body_integrals
c += self.molecule.nuclear_repulsion
h2 = NBodyTensor(h2, ordering="openfermion")
h2 = h2.reorder(to=two_body_ordering).elems
return c, h1, h2
def compute_one_body_integrals(self):
""" convenience function """
c, h1, h2 = self.get_integrals()
return h1
def compute_two_body_integrals(self, two_body_ordering="openfermion"):
""" """
c, h1, h2 = self.get_integrals(two_body_ordering=two_body_ordering)
return h2
def compute_constant_part(self):
c, h1, h2 = self.get_integrals()
return c
def compute_ccsd_amplitudes(self) -> ClosedShellAmplitudes:
""" """
raise Exception("BaseClass Method")
def prepare_reference(self, state=None, *args, **kwargs):
"""
Returns
-------
A tequila circuit object which prepares the reference of this molecule in the chosen transformation
"""
if state is None:
assert self.n_electrons %2 == 0
state = [0]*(self.n_orbitals*2)
for i in range(self.n_electrons):
state[i]=1
reference_state = BitString.from_array(self.transformation.map_state(state=state))
U = prepare_product_state(reference_state)
# prevent trace out in direct wfn simulation
U.n_qubits = self.n_orbitals*2 # adapt when tapered transformations work
return U
def prepare_hardcore_boson_reference(self):
# HF state in the HCB representation (paired electrons)
U = gates.X(target=[i for i in range(self.n_electrons // 2)])
U.n_qubits = self.n_orbitals
return U
def hcb_to_me(self, U=None):
"""
Transform a circuit in the hardcore-boson encoding (HCB)
to the encoding of this molecule
HCB is supposed to be encoded on the first n_orbitals qubits
Parameters
----------
U: HCB circuit (using the alpha qubits)
Returns
-------
"""
if U is None:
U = QCircuit()
# consistency
consistency = [x < self.n_orbitals for x in U.qubits]
if not all(consistency):
warnings.warn(
"hcb_to_me: given circuit is not defined on the first {} qubits. Is this a HCB circuit?".format(
self.n_orbitals))
# map to alpha qubits
alpha_map = {k: self.transformation.up(k) for k in range(self.n_orbitals)}
alpha_U = U.map_qubits(qubit_map=alpha_map)
UX = self.transformation.hcb_to_me()
if UX is None:
raise TequilaException(
"transformation={} has no hcb_to_me function implemented".format(self.transformation))
return alpha_U + UX
def get_pair_specific_indices(self,
pair_info: str = None,
include_singles: bool = True,
general_excitations: bool = True) -> list:
"""
Assuming a pair-specific model, create a pair-specific index list
to be used in make_upccgsd_ansatz(indices = ... )
Excite from a set of references (i) to any pair coming from (i),
i.e. any (i,j)/(j,i). If general excitations are allowed, also
allow excitations from pairs to appendant pairs and reference.
Parameters
----------
pair_info
file or list including information about pair structure
references single number, pair double
example: as file: "0,1,11,11,00,10" (hand over file name)
in file, skip first row assuming some text with information
as list:['0','1`','11','11','00','10']
~> two reference orbitals 0 and 1,
then two orbitals from pair 11, one from 00, one mixed 10
include_singles
include single excitations
general_excitations
allow general excitations
Returns
-------
list of indices with pair-specific ansatz
"""
if pair_info is None:
raise TequilaException("Need to provide some pair information.")
# If pair-information given on file, load (layout see above)
if isinstance(pair_info, str):
pairs = numpy.loadtxt(pair_info, dtype=str, delimiter=",", skiprows=1)
elif isinstance(pair_info, list):
pairs = pair_info
elif not isinstance(pair_info, list):
raise TequilaException("Pair information needs to be contained in a list or filename.")
connect = [[]] * len(pairs)
# determine "connectivity"
generalized = 0
for idx, p in enumerate(pairs):
if len(p) == 1:
connect[idx] = [i for i in range(len(pairs))
if ((len(pairs[i]) == 2) and (str(idx) in pairs[i]))]
elif (len(p) == 2) and general_excitations:
connect[idx] = [i for i in range(len(pairs))
if (((p[0] in pairs[i]) or (p[1] in pairs[i]) or str(i) in p)
and not (i == idx))]
elif len(p) > 2:
raise TequilaException("Invalid reference of pair id.")
# create generating indices from connectivity
indices = []
for i, to in enumerate(connect):
for a in to:
indices.append(((2 * i, 2 * a), (2 * i + 1, 2 * a + 1)))
if include_singles:
indices.append(((2 * i, 2 * a)))
indices.append(((2 * i + 1, 2 * a + 1)))
return indices
def format_excitation_indices(self, idx):
"""
Consistent formatting of excitation indices
idx = [(p0,q0),(p1,q1),...,(pn,qn)]
sorted as: p0<p1<pn and pi<qi
:param idx: list of index tuples describing a single(!) fermionic excitation
:return: tuple-list of index tuples
"""
idx = [tuple(sorted(x)) for x in idx]
idx = sorted(idx, key=lambda x: x[0])
return tuple(idx)
def make_upccgsd_indices(self, key, reference_orbitals=None, *args, **kwargs):
if reference_orbitals is None:
reference_orbitals = [i for i in range(self.n_electrons // 2)]
indices = []
# add doubles in hcb encoding
if hasattr(key, "lower") and key.lower() == "ladder":
# ladder structure of the pair excitations
# ensures local connectivity
indices = [[(n, n + 1)] for n in range(self.n_orbitals - 1)]
elif hasattr(key, "lower") and "g" not in key.lower():
indices = [[(n, m)] for n in reference_orbitals for m in range(self.n_orbitals) if
n < m and m not in reference_orbitals]
elif hasattr(key, "lower") and "g" in key.lower():
indices = [[(n, m)] for n in range(self.n_orbitals) for m in range(self.n_orbitals) if n < m]
else:
raise TequilaException("Unknown recipe: {}".format(key))
indices = [self.format_excitation_indices(idx) for idx in indices]
return indices
def make_hardcore_boson_upccgd_layer(self,
indices: list = "UpCCGD",
label: str = None,
assume_real: bool = True,
*args, **kwargs):
if hasattr(indices, "lower"):
indices = self.make_upccgsd_indices(key=indices.lower())
UD = QCircuit()
for idx in indices:
UD += self.make_hardcore_boson_excitation_gate(indices=idx, angle=(idx, "D", label),
assume_real=assume_real)
return UD
def make_ansatz(self, name:str, *args, **kwargs):
name = name.lower()
if name.strip()=="":
return QCircuit()
if "+" in name:
U = QCircuit()
subparts = name.split("+")
U = self.make_ansatz(name=subparts[0], *args ,**kwargs)
if "include_reference" in kwargs:
kwargs.pop("include_reference")
if "hcb_optimization" in kwargs:
kwargs.pop("hcb_optimization")
for subpart in subparts[1:]:
U += self.make_ansatz(name=subpart, *args, include_reference=False, hcb_optimization=False, **kwargs)
return U
if name=="uccsd":
return self.make_uccsd_ansatz(*args, **kwargs)
elif "d" in name or "s" in name:
return self.make_upccgsd_ansatz(name=name, *args, **kwargs)
else:
raise TequilaException("unknown ansatz with name={}".format(name))
def make_upccgsd_ansatz(self,
include_reference: bool = True,
name: str = "UpCCGSD",
label: str = None,
order: int = None,
assume_real: bool = True,
hcb_optimization: bool = None,
spin_adapt_singles: bool = True,
neglect_z = False,
*args, **kwargs):
"""
UpGCCSD Ansatz similar as described by Lee et. al.
Parameters
----------
include_singles
include singles excitations. Is overwritten if indices are a string (i.e. indices=UpCCGSD will always include singles, UpCCGD will not)
include_reference
include the HF reference state as initial state
indices
pass custom defined set of indices from which the ansatz will be created
List of tuples of tuples spin-indices e.g. [((2*p,2*q),(2*p+1,2*q+1)), ...]
label
An additional label that is set with the variables
default is None and no label will be set: variables names will be
(x, (p,q)) for x in range(order)
with a label the variables will be named
(label, (x, (p,q)))
order
Order of the ansatz (default is 1)
determines how often the ordering gets repeated
parameters of repeating layers are independent
assume_real
assume a real wavefunction (that is always the case if the reference state is real)
reduces potential gradient costs from 4 to 2
Returns
-------
UpGCCSD ansatz
"""
name = name.upper()
if ("A" in name) and neglect_z is None:
neglect_z = True
else:
neglect_z = False
if order is None:
try:
if "-" in name:
order = int(name.split("-")[0])
else:
order = 1
except:
order = 1
indices = self.make_upccgsd_indices(key=name)
# check if the used qubit encoding has a hcb transformation
have_hcb_trafo = self.transformation.hcb_to_me() is not None
# consistency checks for optimization
if have_hcb_trafo and hcb_optimization is None:
hcb_optimization = True
if "HCB" in name:
hcb_optimization = True
if hcb_optimization and not have_hcb_trafo and "HCB" not in name:
raise TequilaException(
"use_hcb={} but transformation={} has no \'hcb_to_me\' function. Try transformation=\'ReorderedJordanWigner\'".format(
hcb_optimization, self.transformation))
if "S" in name and "HCB" in name:
if "HCB" in name and "S" in name:
raise Exception(
"name={}, Singles can't be realized without mapping back to the standard encoding leave S or HCB out of the name".format(
name))
# first layer
if not hcb_optimization:
U = QCircuit()
if include_reference:
U = self.prepare_reference()
U += self.make_upccgsd_layer(include_singles="S" in name, indices=indices, assume_real=assume_real,
label=(label, 0), spin_adapt_singles=spin_adapt_singles, *args, **kwargs)
else:
U = QCircuit()
if include_reference:
U = self.prepare_hardcore_boson_reference()
U += self.make_hardcore_boson_upccgd_layer(indices=indices, assume_real=assume_real, label=(label, 0),
*args, **kwargs)
if "HCB" not in name:
U = self.hcb_to_me(U=U)
if "S" in name:
U += self.make_upccgsd_singles(indices=indices, assume_real=assume_real, label=(label, 0),
spin_adapt_singles=spin_adapt_singles, neglect_z=neglect_z, *args, **kwargs)
for k in range(1, order):
U += self.make_upccgsd_layer(include_singles="S" in name, indices=indices, label=(label, k),
spin_adapt_singles=spin_adapt_singles, neglect_z=neglect_z)
return U
def make_upccgsd_layer(self, indices, include_singles=True, include_doubles=True, assume_real=True, label=None,
spin_adapt_singles: bool = True, angle_transform=None, mix_sd=False, neglect_z=False, *args, **kwargs):
U = QCircuit()
for idx in indices:
assert len(idx) == 1
idx = idx[0]
angle = (tuple([idx]), "D", label)
if include_doubles:
if "jordanwigner" in self.transformation.name.lower() and not self.transformation.up_then_down:
# we can optimize with qubit excitations for the JW representation
target=[self.transformation.up(idx[0]), self.transformation.up(idx[1]), self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle, target=target, assume_real=assume_real, **kwargs)
else:
U += self.make_excitation_gate(angle=angle,
indices=((2 * idx[0], 2 * idx[1]), (2 * idx[0] + 1, 2 * idx[1] + 1)),
assume_real=assume_real, **kwargs)
if include_singles and mix_sd:
U += self.make_upccgsd_singles(indices=[idx], assume_real=assume_real, label=label,
spin_adapt_singles=spin_adapt_singles, angle_transform=angle_transform, neglect_z=neglect_z)
if include_singles and not mix_sd:
U += self.make_upccgsd_singles(indices=indices, assume_real=assume_real, label=label,
spin_adapt_singles=spin_adapt_singles, angle_transform=angle_transform, neglect_z=neglect_z)
return U
def make_upccgsd_singles(self, indices="UpCCGSD", spin_adapt_singles=True, label=None, angle_transform=None,
assume_real=True, neglect_z=False, *args, **kwargs):
if neglect_z and "jordanwigner" not in self.transformation.name.lower():
raise TequilaException("neglegt-z approximation in UpCCGSD singles needs the (Reversed)JordanWigner representation")
if hasattr(indices, "lower"):
indices = self.make_upccgsd_indices(key=indices)
U = QCircuit()
for idx in indices:
assert len(idx) == 1
idx = idx[0]
if spin_adapt_singles:
angle = (idx, "S", label)
if angle_transform is not None:
angle = angle_transform(angle)
if neglect_z:
targeta=[self.transformation.up(idx[0]), self.transformation.up(idx[1])]
targetb=[self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle, target=targeta, assume_real=assume_real, **kwargs)
U += gates.QubitExcitation(angle=angle, target=targetb, assume_real=assume_real, **kwargs)
else:
U += self.make_excitation_gate(angle=angle, indices=[(2 * idx[0], 2 * idx[1])], assume_real=assume_real, **kwargs)
U += self.make_excitation_gate(angle=angle, indices=[(2 * idx[0] + 1, 2 * idx[1] + 1)],
assume_real=assume_real, **kwargs)
else:
angle1 = (idx, "SU", label)
angle2 = (idx, "SD", label)
if angle_transform is not None:
angle1 = angle_transform(angle1)
angle2 = angle_transform(angle2)
if neglect_z:
targeta=[self.transformation.up(idx[0]), self.transformation.up(idx[1])]
targetb=[self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle1, target=targeta, assume_real=assume_real, *kwargs)
U += gates.QubitExcitation(angle=angle2, target=targetb, assume_real=assume_real, *kwargs)
else:
U += self.make_excitation_gate(angle=angle1, indices=[(2 * idx[0], 2 * idx[1])],
assume_real=assume_real, **kwargs)
U += self.make_excitation_gate(angle=angle2, indices=[(2 * idx[0] + 1, 2 * idx[1] + 1)],
assume_real=assume_real, **kwargs)
return U
def make_uccsd_ansatz(self, trotter_steps: int=1,
initial_amplitudes: typing.Union[str, Amplitudes, ClosedShellAmplitudes] = "mp2",
include_reference_ansatz=True,
parametrized=True,
threshold=1.e-8,
add_singles=None,
*args, **kwargs) -> QCircuit:
"""
Parameters
----------
initial_amplitudes :
initial amplitudes given as ManyBodyAmplitudes structure or as string
where 'mp2', 'cc2' or 'ccsd' are possible initializations
include_reference_ansatz :
Also do the reference ansatz (prepare closed-shell Hartree-Fock) (Default value = True)
parametrized :
Initialize with variables, otherwise with static numbers (Default value = True)
trotter_steps: int :
initial_amplitudes: typing.Union[str :
Amplitudes :
ClosedShellAmplitudes] :
(Default value = "cc2")
Returns
-------
type
Parametrized QCircuit
"""
if hasattr(initial_amplitudes, "lower"):
if initial_amplitudes.lower() == "mp2" and add_singles is None:
add_singles=True
elif initial_amplitudes is not None and add_singles is not None:
warnings.warn("make_uccsd_anstatz: add_singles has no effect when explicit amplitudes are passed down", TequilaWarning)
elif add_singles is None:
add_singles=True
if self.n_electrons % 2 != 0:
raise TequilaException("make_uccsd_ansatz currently only for closed shell systems")
nocc = self.n_electrons // 2
nvirt = self.n_orbitals - nocc
Uref = QCircuit()
if include_reference_ansatz:
Uref = self.prepare_reference()
amplitudes = initial_amplitudes
if hasattr(initial_amplitudes, "lower"):
if initial_amplitudes.lower() == "mp2":
amplitudes = self.compute_mp2_amplitudes()
elif initial_amplitudes.lower() == "ccsd":
amplitudes = self.compute_ccsd_amplitudes()
else:
try:
amplitudes = self.compute_amplitudes(method=initial_amplitudes.lower())
except Exception as exc:
raise TequilaException(
"{}\nDon't know how to initialize \'{}\' amplitudes".format(exc, initial_amplitudes))
if amplitudes is None:
tia=None
if add_singles: tia=numpy.zeros(shape=[nocc, nvirt])
amplitudes = ClosedShellAmplitudes(
tIjAb=numpy.zeros(shape=[nocc, nocc, nvirt, nvirt]),
tIA=tia)
closed_shell = isinstance(amplitudes, ClosedShellAmplitudes)
indices = {}
if not isinstance(amplitudes, dict):
amplitudes = amplitudes.make_parameter_dictionary(threshold=threshold)
amplitudes = dict(sorted(amplitudes.items(), key=lambda x: numpy.fabs(x[1]), reverse=True))
for key, t in amplitudes.items():
assert (len(key) % 2 == 0)
if not numpy.isclose(t, 0.0, atol=threshold):
if closed_shell:
if len(key) == 2 and add_singles:
# singles
angle=2.0*t
if parametrized:
angle=2.0*Variable(name=key)
idx_a = (2*key[0], 2*key[1])
idx_b = (2*key[0]+1, 2*key[1]+1)
indices[idx_a]=angle
indices[idx_b]=angle
else:
assert len(key)==4
angle=2.0*t
if parametrized:
angle=2.0*Variable(name=key)
idx_abab=(2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2], 2 * key[3])
indices[idx_abab]=angle
if key[0]!=key[2] and key[1]!=key[3]:
idx_aaaa=(2 * key[0], 2 * key[1], 2 * key[2], 2 * key[3])
idx_bbbb=(2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2]+1, 2 * key[3]+1)
partner = tuple([key[2], key[1], key[0], key[3]])
anglex=2.0*(t - amplitudes[partner])
if parametrized:
anglex=2.0*(Variable(name=key) - Variable(partner))
indices[idx_aaaa]=anglex
indices[idx_bbbb]=anglex
else:
raise Exception("only closed-shell supported, please assemble yourself .... sorry :-)")
UCCSD = QCircuit()
factor = 1.0 / trotter_steps
for step in range(trotter_steps):
for idx, angle in indices.items():
UCCSD += self.make_excitation_gate(indices=idx, angle=factor * angle)
if hasattr(initial_amplitudes,"lower") and initial_amplitudes.lower()=="mp2" and parametrized and add_singles:
# mp2 has no singles, need to initialize them here (if not parametrized initializling as 0.0 makes no sense though)
UCCSD += self.make_upccgsd_layer(indices="upccsd", include_singles=True, include_doubles=False)
return Uref + UCCSD
def compute_amplitudes(self, method: str, *args, **kwargs):
"""
Compute closed-shell CC amplitudes
Parameters
----------
method :
coupled-cluster methods like cc2, ccsd, cc3, ccsd(t)
Success might depend on backend
got an extra function for MP2
*args :
**kwargs :
Returns
-------
"""
raise TequilaException("compute amplitudes: Needs to be overwritten by backend")
def compute_mp2_amplitudes(self) -> ClosedShellAmplitudes:
"""
Compute closed-shell mp2 amplitudes
.. math::
t(a,i,b,j) = 0.25 * g(a,i,b,j)/(e(i) + e(j) -a(i) - b(j) )
:return:
Parameters
----------
Returns
-------
"""
g = self.molecule.two_body_integrals
fij = self.molecule.orbital_energies
nocc = self.molecule.n_electrons // 2 # this is never the active space
ei = fij[:nocc]
ai = fij[nocc:]
abgij = g[nocc:, nocc:, :nocc, :nocc]
amplitudes = abgij * 1.0 / (
ei.reshape(1, 1, -1, 1) + ei.reshape(1, 1, 1, -1) - ai.reshape(-1, 1, 1, 1) - ai.reshape(1, -1, 1, 1))
E = 2.0 * numpy.einsum('abij,abij->', amplitudes, abgij) - numpy.einsum('abji,abij', amplitudes, abgij,
optimize='greedy')
self.molecule.mp2_energy = E + self.molecule.hf_energy
return ClosedShellAmplitudes(tIjAb=numpy.einsum('abij -> ijab', amplitudes, optimize='greedy'))
def compute_cis_amplitudes(self):
"""
Compute the CIS amplitudes of the molecule
"""
@dataclass
class ResultCIS:
""" """
omegas: typing.List[numbers.Real] # excitation energies [omega0, ...]
amplitudes: typing.List[ClosedShellAmplitudes] # corresponding amplitudes [x_{ai}_0, ...]
def __getitem__(self, item):
return (self.omegas[item], self.amplitudes[item])
def __len__(self):
return len(self.omegas)
g = self.molecule.two_body_integrals
fij = self.molecule.orbital_energies
nocc = self.n_alpha_electrons
nvirt = self.n_orbitals - nocc
pairs = []
for i in range(nocc):
for a in range(nocc, nocc + nvirt):
pairs.append((a, i))
M = numpy.ndarray(shape=[len(pairs), len(pairs)])
for xx, x in enumerate(pairs):
eia = fij[x[0]] - fij[x[1]]
a, i = x
for yy, y in enumerate(pairs):
b, j = y
delta = float(y == x)
gpart = 2.0 * g[a, i, b, j] - g[a, i, j, b]
M[xx, yy] = eia * delta + gpart
omega, xvecs = numpy.linalg.eigh(M)
# convert amplitudes to ndarray sorted by excitation energy
nex = len(omega)
amplitudes = []
for ex in range(nex):
t = numpy.ndarray(shape=[nvirt, nocc])
exvec = xvecs[ex]
for xx, x in enumerate(pairs):
a, i = x
t[a - nocc, i] = exvec[xx]
amplitudes.append(ClosedShellAmplitudes(tIA=t))
return ResultCIS(omegas=list(omega), amplitudes=amplitudes)
@property
def rdm1(self):
"""
Returns RMD1 if computed with compute_rdms function before
"""
if self._rdm1 is not None:
return self._rdm1
else:
print("1-RDM has not been computed. Return None for 1-RDM.")
return None
@property
def rdm2(self):
"""
Returns RMD2 if computed with compute_rdms function before
This is returned in Dirac (physics) notation by default (can be changed in compute_rdms with keyword)!
"""
if self._rdm2 is not None:
return self._rdm2
else:
print("2-RDM has not been computed. Return None for 2-RDM.")
return None
def compute_rdms(self, U: QCircuit = None, variables: Variables = None, spin_free: bool = True,
get_rdm1: bool = True, get_rdm2: bool = True, ordering="dirac"):
"""
Computes the one- and two-particle reduced density matrices (rdm1 and rdm2) given
a unitary U. This method uses the standard ordering in physics as denoted below.
Note, that the representation of the density matrices depends on the qubit transformation
used. The Jordan-Wigner encoding corresponds to 'classical' second quantized density
matrices in the occupation picture.
We only consider real orbitals and thus real-valued RDMs.
The matrices are set as private members _rdm1, _rdm2 and can be accessed via the properties rdm1, rdm2.
.. math :
\\text{rdm1: } \\gamma^p_q = \\langle \\psi | a^p a_q | \\psi \\rangle
= \\langle U 0 | a^p a_q | U 0 \\rangle
\\text{rdm2: } \\gamma^{pq}_{rs} = \\langle \\psi | a^p a^q a_s a_r | \\psi \\rangle
= \\langle U 0 | a^p a^q a_s a_r | U 0 \\rangle
Parameters
----------
U :
Quantum Circuit to achieve the desired state \\psi = U |0\\rangle, non-optional
variables :
If U is parametrized, then need to hand over a set of fixed variables
spin_free :
Set whether matrices should be spin-free (summation over spin) or defined by spin-orbitals
get_rdm1, get_rdm2 :
Set whether either one or both rdm1, rdm2 should be computed. If both are needed at some point,
it is recommended to compute them at once.
Returns
-------
"""
# Check whether unitary circuit is not 0
if U is None:
raise TequilaException('Need to specify a Quantum Circuit.')
# Check whether transformation is BKSF.
# Issue here: when a single operator acts only on a subset of qubits, BKSF might not yield the correct
# transformation, because it computes the number of qubits incorrectly in this case.
# A hotfix such as for symmetry_conserving_bravyi_kitaev would require deeper changes, thus omitted for now
if type(self.transformation).__name__ == "BravyiKitaevFast":
raise TequilaException(
"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet.")
# Set up number of spin-orbitals and molecular orbitals respectively
n_SOs = 2 * self.n_orbitals
n_MOs = self.n_orbitals
# Check whether unitary circuit is not 0
if U is None:
raise TequilaException('Need to specify a Quantum Circuit.')
def _get_of_op(operator_tuple):
""" Returns operator given by a operator tuple as OpenFermion - Fermion operator """
op = openfermion.FermionOperator(operator_tuple)
return op
def _get_qop_hermitian(of_operator) -> QubitHamiltonian:
""" Returns Hermitian part of Fermion operator as QubitHamiltonian """
qop = self.transformation(of_operator)
#qop = QubitHamiltonian(self.transformation(of_operator))
real, imag = qop.split(hermitian=True)
if real:
return real
elif not real:
raise TequilaException(
"Qubit Hamiltonian does not have a Hermitian part. Operator ={}".format(of_operator))
def _build_1bdy_operators_spinful() -> list:
""" Returns spinful one-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetry pq = qp
ops = []
for p in range(n_SOs):
for q in range(p + 1):
op_tuple = ((p, 1), (q, 0))
op = _get_of_op(op_tuple)
ops += [op]
return ops
def _build_2bdy_operators_spinful() -> list:
""" Returns spinful two-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetries pqrs = -pqsr = -qprs = qpsr
# and = rspq
ops = []
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
if p * n_SOs + q >= r * n_SOs + s:
op_tuple = ((p, 1), (q, 1), (s, 0), (r, 0))
op = _get_of_op(op_tuple)
ops += [op]
return ops
def _build_1bdy_operators_spinfree() -> list:
""" Returns spinfree one-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetry pq = qp (not changed by spin-summation)
ops = []
for p in range(n_MOs):
for q in range(p + 1):
# Spin aa
op_tuple = ((2 * p, 1), (2 * q, 0))
op = _get_of_op(op_tuple)
# Spin bb
op_tuple = ((2 * p + 1, 1), (2 * q + 1, 0))
op += _get_of_op(op_tuple)
ops += [op]
return ops
def _build_2bdy_operators_spinfree() -> list:
""" Returns spinfree two-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetries pqrs = qpsr (due to spin summation, '-pqsr = -qprs' drops out)
# and = rspq
ops = []
for p, q, r, s in product(range(n_MOs), repeat=4):
if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):
# Spin aaaa
op_tuple = ((2 * p, 1), (2 * q, 1), (2 * s, 0), (2 * r, 0)) if (p != q and r != s) else '0.0 []'
op = _get_of_op(op_tuple)
# Spin abab
op_tuple = ((2 * p, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r, 0)) if (
2 * p != 2 * q + 1 and 2 * r != 2 * s + 1) else '0.0 []'
op += _get_of_op(op_tuple)
# Spin baba
op_tuple = ((2 * p + 1, 1), (2 * q, 1), (2 * s, 0), (2 * r + 1, 0)) if (
2 * p + 1 != 2 * q and 2 * r + 1 != 2 * s) else '0.0 []'
op += _get_of_op(op_tuple)
# Spin bbbb
op_tuple = ((2 * p + 1, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r + 1, 0)) if (
p != q and r != s) else '0.0 []'
op += _get_of_op(op_tuple)
ops += [op]
return ops
def _assemble_rdm1(evals) -> numpy.ndarray:
"""
Returns spin-ful or spin-free one-particle RDM built by symmetry conditions
Same symmetry with or without spin, so we can use the same function
"""
N = n_MOs if spin_free else n_SOs
rdm1 = numpy.zeros([N, N])
ctr: int = 0
for p in range(N):
for q in range(p + 1):
rdm1[p, q] = evals[ctr]
# Symmetry pq = qp
rdm1[q, p] = rdm1[p, q]
ctr += 1
return rdm1
def _assemble_rdm2_spinful(evals) -> numpy.ndarray:
""" Returns spin-ful two-particle RDM built by symmetry conditions """
ctr: int = 0
rdm2 = numpy.zeros([n_SOs, n_SOs, n_SOs, n_SOs])
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
if p * n_SOs + q >= r * n_SOs + s:
rdm2[p, q, r, s] = evals[ctr]
# Symmetry pqrs = rspq
rdm2[r, s, p, q] = rdm2[p, q, r, s]
ctr += 1
# Further permutational symmetries due to anticommutation relations
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
rdm2[p, q, s, r] = -1 * rdm2[p, q, r, s] # pqrs = -pqsr
rdm2[q, p, r, s] = -1 * rdm2[p, q, r, s] # pqrs = -qprs
rdm2[q, p, s, r] = rdm2[p, q, r, s] # pqrs = qpsr
return rdm2
def _assemble_rdm2_spinfree(evals) -> numpy.ndarray:
""" Returns spin-free two-particle RDM built by symmetry conditions """
ctr: int = 0
rdm2 = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])
for p, q, r, s in product(range(n_MOs), repeat=4):
if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):
rdm2[p, q, r, s] = evals[ctr]
# Symmetry pqrs = rspq
rdm2[r, s, p, q] = rdm2[p, q, r, s]
ctr += 1
# Further permutational symmetry: pqrs = qpsr
for p, q, r, s in product(range(n_MOs), repeat=4):
if p >= q or r >= s:
rdm2[q, p, s, r] = rdm2[p, q, r, s]
return rdm2
# Build operator lists
qops = []
if spin_free:
qops += _build_1bdy_operators_spinfree() if get_rdm1 else []
qops += _build_2bdy_operators_spinfree() if get_rdm2 else []
else:
qops += _build_1bdy_operators_spinful() if get_rdm1 else []
qops += _build_2bdy_operators_spinful() if get_rdm2 else []
# Transform operator lists to QubitHamiltonians
qops = [_get_qop_hermitian(op) for op in qops]
# Compute expected values
evals = simulate(ExpectationValue(H=qops, U=U, shape=[len(qops)]), variables=variables)
# Assemble density matrices
# If self._rdm1, self._rdm2 exist, reset them if they are of the other spin-type
def _reset_rdm(rdm):
if rdm is not None:
if spin_free and rdm.shape[0] != n_MOs:
return None
if not spin_free and rdm.shape[0] != n_SOs:
return None
return rdm
self._rdm1 = _reset_rdm(self._rdm1)
self._rdm2 = _reset_rdm(self._rdm2)
# Split expectation values in 1- and 2-particle expectation values
if get_rdm1:
len_1 = n_MOs * (n_MOs + 1) // 2 if spin_free else n_SOs * (n_SOs + 1) // 2
else:
len_1 = 0
evals_1, evals_2 = evals[:len_1], evals[len_1:]
# Build matrices using the expectation values
self._rdm1 = _assemble_rdm1(evals_1) if get_rdm1 else self._rdm1
if spin_free:
self._rdm2 = _assemble_rdm2_spinfree(evals_2) if get_rdm2 else self._rdm2
else:
self._rdm2 = _assemble_rdm2_spinful(evals_2) if get_rdm2 else self._rdm2
if get_rdm2:
rdm2 = NBodyTensor(elems=self.rdm2, ordering="dirac")
rdm2.reorder(to=ordering)
rdm2 = rdm2.elems
self._rdm2 = rdm2
if get_rdm1:
if get_rdm2:
return self.rdm1, self.rdm2
else:
return self.rdm1
elif get_rdm2:
return self.rdm2
else:
warnings.warn("compute_rdms called with instruction to not compute?", TequilaWarning)
def rdm_spinsum(self, sum_rdm1: bool = True, sum_rdm2: bool = True) -> tuple:
"""
Given the spin-ful 1- and 2-particle reduced density matrices, compute the spin-free RDMs by spin summation.
Parameters
----------
sum_rdm1, sum_rdm2 :
If set to true, perform spin summation on rdm1, rdm2
Returns
-------
rdm1_spinsum, rdm2_spinsum :
The desired spin-free matrices
"""
n_MOs = self.n_orbitals
rdm1_spinsum = None
rdm2_spinsum = None
# Spin summation on rdm1
if sum_rdm1:
# Check whether spin-rdm2 exists
if self._rdm1 is None:
raise TequilaException("The spin-RDM for the 1-RDM does not exist!")
# Check whether existing rdm1 is in spin-orbital basis
if self._rdm1.shape[0] != 2 * n_MOs:
raise TequilaException("The existing RDM needs to be in spin-orbital basis, it is already spin-free!")
# Do summation
rdm1_spinsum = numpy.zeros([n_MOs, n_MOs])
for p in range(n_MOs):
for q in range(p + 1):
rdm1_spinsum[p, q] += self._rdm1[2 * p, 2 * q]
rdm1_spinsum[p, q] += self._rdm1[2 * p + 1, 2 * q + 1]
for p in range(n_MOs):
for q in range(p):
rdm1_spinsum[q, p] = rdm1_spinsum[p, q]
# Spin summation on rdm2
if sum_rdm2:
# Check whether spin-rdm2 exists
if self._rdm2 is None:
raise TequilaException("The spin-RDM for the 2-RDM does not exist!")
# Check whether existing rdm2 is in spin-orbital basis
if self._rdm2.shape[0] != 2 * n_MOs:
raise TequilaException("The existing RDM needs to be in spin-orbital basis, it is already spin-free!")
# Do summation
rdm2_spinsum = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])
for p, q, r, s in product(range(n_MOs), repeat=4):
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q, 2 * r, 2 * s]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q, 2 * r + 1, 2 * s]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q + 1, 2 * r, 2 * s + 1]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q + 1, 2 * r + 1, 2 * s + 1]
return rdm1_spinsum, rdm2_spinsum
def perturbative_f12_correction(self, rdm1: numpy.ndarray = None, rdm2: numpy.ndarray = None,
gamma: float = 1.4, n_ri: int = None,
external_info: dict = None, **kwargs) -> float:
"""
Computes the spin-free [2]_R12 correction, needing only the 1- and 2-RDM of a reference method
Requires either 1-RDM, 2-RDM or information to compute them in kwargs
Parameters
----------
rdm1 :
1-electron reduced density matrix
rdm2 :
2-electron reduced density matrix
gamma :
f12-exponent, for a correlation factor f_12 = -1/gamma * exp[-gamma*r_12]
n_ri :
dimensionality of RI-basis; specify only, if want to truncate available RI-basis
if None, then the maximum available via tensors / basis-set is used
must not be larger than size of available RI-basis, and not smaller than size of OBS
for n_ri==dim(OBS), the correction returns zero
external_info :
for usage in qc_base, need to provide information where to find one-body tensor f12-tensor <rs|f_12|pq>;
pass dictionary with {"f12_filename": where to find f12-tensor, "scheme": ordering scheme of tensor}
kwargs :
e.g. RDM-information via {"U": QCircuit, "variables": optimal angles}, needs to be passed if rdm1,rdm2 not
yet computed
Returns
-------
the f12 correction for the energy
"""
from .f12_corrections._f12_correction_base import ExplicitCorrelationCorrection
correction = ExplicitCorrelationCorrection(mol=self, rdm1=rdm1, rdm2=rdm2, gamma=gamma,
n_ri=n_ri, external_info=external_info, **kwargs)
return correction.compute()
def __str__(self) -> str:
result = str(type(self)) + "\n"
result += "Qubit Encoding\n"
result += str(self.transformation) + "\n\n"
result += "Parameters\n"
for k, v in self.parameters.__dict__.items():
result += "{key:15} : {value:15} \n".format(key=str(k), value=str(v))
result += "\n"
return result
| 41.208475 | 255 | 0.553811 | [
"MIT"
] | CopperHu/tequila | src/tequila/quantumchemistry/qc_base.py | 84,601 | Python |
#!/usr/bin/env python
#
# svnadmin_tests.py: testing the 'svnadmin' tool.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import os
import logging
import re
import shutil
import sys
import threading
import time
import gzip
logger = logging.getLogger()
# Our testing module
import svntest
from svntest.verify import SVNExpectedStdout, SVNExpectedStderr
from svntest.verify import SVNUnexpectedStderr
from svntest.verify import UnorderedOutput
from svntest.main import SVN_PROP_MERGEINFO
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
SkipDumpLoadCrossCheck = svntest.testcase.SkipDumpLoadCrossCheck_deco
Item = svntest.wc.StateItem
def read_rep_cache(repo_dir):
"""Return the rep-cache contents as a dict {hash: (rev, index, ...)}.
"""
db_path = os.path.join(repo_dir, 'db', 'rep-cache.db')
db1 = svntest.sqlite3.connect(db_path)
schema1 = db1.execute("pragma user_version").fetchone()[0]
# Can't test newer rep-cache schemas with an old built-in SQLite; see the
# documentation of STMT_CREATE_SCHEMA_V2 in ../../libsvn_fs_fs/rep-cache-db.sql
if schema1 >= 2 and svntest.sqlite3.sqlite_version_info < (3, 8, 2):
raise svntest.Failure("Can't read rep-cache schema %d using old "
"Python-SQLite version %s < (3,8,2)" %
(schema1,
svntest.sqlite3.sqlite_version_info))
content = { row[0]: row[1:] for row in
db1.execute("select * from rep_cache") }
return content
def check_hotcopy_bdb(src, dst):
"Verify that the SRC BDB repository has been correctly copied to DST."
### TODO: This function should be extended to verify all hotcopied files,
### not just compare the output of 'svnadmin dump'. See check_hotcopy_fsfs().
exit_code, origout, origerr = svntest.main.run_svnadmin("dump", src,
'--quiet')
exit_code, backout, backerr = svntest.main.run_svnadmin("dump", dst,
'--quiet')
if origerr or backerr or origout != backout:
raise svntest.Failure
def check_hotcopy_fsfs_fsx(src, dst):
# Walk the source and compare all files to the destination
for src_dirpath, src_dirs, src_files in os.walk(src):
# Verify that the current directory exists in the destination
dst_dirpath = src_dirpath.replace(src, dst)
if not os.path.isdir(dst_dirpath):
raise svntest.Failure("%s does not exist in hotcopy "
"destination" % dst_dirpath)
# Verify that all dirents in the current directory also exist in source
for dst_dirent in os.listdir(dst_dirpath):
# Ignore auto-created empty lock files as they may or may not
# be present and are neither required by nor do they harm to
# the destination repository.
if dst_dirent == 'pack-lock':
continue
if dst_dirent == 'write-lock':
continue
# Ignore auto-created rep-cache.db-journal file
if dst_dirent == 'rep-cache.db-journal':
continue
src_dirent = os.path.join(src_dirpath, dst_dirent)
if not os.path.exists(src_dirent):
raise svntest.Failure("%s does not exist in hotcopy "
"source" % src_dirent)
# Compare all files in this directory
for src_file in src_files:
# Ignore auto-created empty lock files as they may or may not
# be present and are neither required by nor do they harm to
# the destination repository.
if src_file == 'pack-lock':
continue
if src_file == 'write-lock':
continue
# Ignore auto-created rep-cache.db-journal file
if src_file == 'rep-cache.db-journal':
continue
src_path = os.path.join(src_dirpath, src_file)
dst_path = os.path.join(dst_dirpath, src_file)
if not os.path.isfile(dst_path):
raise svntest.Failure("%s does not exist in hotcopy "
"destination" % dst_path)
# Special case for db/uuid: Only the UUID in the first line needs
# to match. Source and target must have the same number of lines
# (due to having the same format).
if src_path == os.path.join(src, 'db', 'uuid'):
lines1 = open(src_path, 'rb').read().split(b"\n")
lines2 = open(dst_path, 'rb').read().split(b"\n")
if len(lines1) != len(lines2):
raise svntest.Failure("%s differs in number of lines"
% dst_path)
if lines1[0] != lines2[0]:
raise svntest.Failure("%s contains different uuid: '%s' vs. '%s'"
% (dst_path, lines1[0], lines2[0]))
continue
# Special case for rep-cache: It will always differ in a byte-by-byte
# comparison, so compare db tables instead.
if src_file == 'rep-cache.db':
db1 = svntest.sqlite3.connect(src_path)
db2 = svntest.sqlite3.connect(dst_path)
schema1 = db1.execute("pragma user_version").fetchone()[0]
schema2 = db2.execute("pragma user_version").fetchone()[0]
if schema1 != schema2:
raise svntest.Failure("rep-cache schema differs: '%s' vs. '%s'"
% (schema1, schema2))
# Can't test newer rep-cache schemas with an old built-in SQLite.
if schema1 >= 2 and svntest.sqlite3.sqlite_version_info < (3, 8, 2):
continue
rows1 = []
rows2 = []
for row in db1.execute("select * from rep_cache order by hash"):
rows1.append(row)
for row in db2.execute("select * from rep_cache order by hash"):
rows2.append(row)
if len(rows1) != len(rows2):
raise svntest.Failure("number of rows in rep-cache differs")
for i in range(len(rows1)):
if rows1[i] != rows2[i]:
raise svntest.Failure("rep-cache row %i differs: '%s' vs. '%s'"
% (i, rows1[i], rows2[i]))
continue
# Special case for revprop-generation: It will always be zero in
# the hotcopy destination (i.e. a fresh cache generation)
if src_file == 'revprop-generation':
f2 = open(dst_path, 'r')
revprop_gen = int(f2.read().strip())
if revprop_gen != 0:
raise svntest.Failure("Hotcopy destination has non-zero " +
"revprop generation")
continue
f1 = open(src_path, 'rb')
f2 = open(dst_path, 'rb')
while True:
offset = 0
BUFSIZE = 1024
buf1 = f1.read(BUFSIZE)
buf2 = f2.read(BUFSIZE)
if not buf1 or not buf2:
if not buf1 and not buf2:
# both at EOF
break
elif buf1:
raise svntest.Failure("%s differs at offset %i" %
(dst_path, offset))
elif buf2:
raise svntest.Failure("%s differs at offset %i" %
(dst_path, offset))
if len(buf1) != len(buf2):
raise svntest.Failure("%s differs in length" % dst_path)
for i in range(len(buf1)):
if buf1[i] != buf2[i]:
raise svntest.Failure("%s differs at offset %i"
% (dst_path, offset))
offset += 1
f1.close()
f2.close()
def check_hotcopy_fsfs(src, dst):
"Verify that the SRC FSFS repository has been correctly copied to DST."
check_hotcopy_fsfs_fsx(src, dst)
def check_hotcopy_fsx(src, dst):
"Verify that the SRC FSX repository has been correctly copied to DST."
check_hotcopy_fsfs_fsx(src, dst)
#----------------------------------------------------------------------
# How we currently test 'svnadmin' --
#
# 'svnadmin create': Create an empty repository, test that the
# root node has a proper created-revision,
# because there was once a bug where it
# didn't.
#
# Note also that "svnadmin create" is tested
# implicitly every time we run a python test
# script. (An empty repository is always
# created and then imported into; if this
# subcommand failed catastrophically, every
# test would fail and we would know instantly.)
#
# 'svnadmin createtxn'
# 'svnadmin rmtxn': See below.
#
# 'svnadmin lstxns': We don't care about the contents of transactions;
# we only care that they exist or not.
# Therefore, we can simply parse transaction headers.
#
# 'svnadmin dump': A couple regression tests that ensure dump doesn't
# error out, and one to check that the --quiet option
# really does what it's meant to do. The actual
# contents of the dump aren't verified at all.
#
# ### TODO: someday maybe we could parse the contents of trees too.
#
######################################################################
# Helper routines
def get_txns(repo_dir):
"Get the txn names using 'svnadmin lstxns'."
exit_code, output_lines, error_lines = svntest.main.run_svnadmin('lstxns',
repo_dir)
txns = sorted([output_lines.strip(x) for x in output_lines])
return txns
def patch_format(repo_dir, shard_size):
"""Rewrite the format of the FSFS or FSX repository REPO_DIR so
that it would use sharding with SHARDS revisions per shard."""
format_path = os.path.join(repo_dir, "db", "format")
contents = open(format_path, 'rb').read()
processed_lines = []
for line in contents.split(b"\n"):
if line.startswith(b"layout "):
processed_lines.append(("layout sharded %d" % shard_size).encode())
else:
processed_lines.append(line)
new_contents = b"\n".join(processed_lines)
os.chmod(format_path, svntest.main.S_ALL_RW)
with open(format_path, 'wb') as f:
f.write(new_contents)
def is_sharded(repo_dir):
"""Return whether the FSFS repository REPO_DIR is sharded."""
format_path = os.path.join(repo_dir, "db", "format")
contents = open(format_path, 'rb').read()
for line in contents.split(b"\n"):
if line.startswith(b"layout sharded"):
return True
return False
def load_and_verify_dumpstream(sbox, expected_stdout, expected_stderr,
revs, check_props, dump, *varargs):
"""Load the array of lines passed in DUMP into the current tests'
repository and verify the repository content using the array of
wc.States passed in REVS. If CHECK_PROPS is True, check properties
of each rev's items. VARARGS are optional arguments passed to the
'load' command."""
dump = svntest.main.ensure_list(dump)
exit_code, output, errput = svntest.main.run_command_stdin(
svntest.main.svnadmin_binary, expected_stderr, 0, True, dump,
'load', '--quiet', sbox.repo_dir, *varargs)
if expected_stdout:
if expected_stdout is svntest.verify.AnyOutput:
if len(output) == 0:
raise SVNExpectedStdout
else:
svntest.verify.compare_and_display_lines(
"Standard output", "STDOUT:", expected_stdout, output)
if expected_stderr:
if expected_stderr is svntest.verify.AnyOutput:
if len(errput) == 0:
raise SVNExpectedStderr
else:
svntest.verify.compare_and_display_lines(
"Standard error output", "STDERR:", expected_stderr, errput)
# The expected error occurred, so don't try to verify the result
return
if revs:
# verify revs as wc states
for rev in range(len(revs)):
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [],
"update", "-r%s" % (rev+1),
sbox.wc_dir)
rev_tree = revs[rev]
svntest.actions.verify_disk(sbox.wc_dir, rev_tree, check_props)
def load_dumpstream(sbox, dump, *varargs):
"Load dump text without verification."
return load_and_verify_dumpstream(sbox, None, None, None, False, dump,
*varargs)
class FSFS_Index:
"""Manages indexes of a rev file in a FSFS format 7 repository.
The interface returns P2L information and allows for item offsets
and lengths to be modified. """
def __init__(self, sbox, revision):
self.by_item = { }
self.revision = revision
self.repo_dir = sbox.repo_dir
self._read()
def _read(self):
""" Read P2L index using svnfsfs. """
exit_code, output, errput = svntest.main.run_svnfsfs('dump-index',
'-r' + str(self.revision),
self.repo_dir)
svntest.verify.verify_outputs("Error while dumping index",
[], errput, [], [])
svntest.verify.verify_exit_code(None, exit_code, 0)
self.by_item.clear()
for line in output:
values = line.split()
if len(values) >= 4 and values[0] != 'Start':
item = int(values[4])
self.by_item[item] = values
def _write(self):
""" Rewrite indexes using svnfsfs. """
by_offset = {}
for key in self.by_item:
values = self.by_item[key]
by_offset[int(values[0], 16)] = values
lines = []
for (offset, values) in sorted(by_offset.items()):
values = by_offset[offset]
line = values[0] + ' ' + values[1] + ' ' + values[2] + ' ' + \
values[3] + ' ' + values[4] + '\n';
lines.append(line.encode())
exit_code, output, errput = svntest.main.run_command_stdin(
svntest.main.svnfsfs_binary, 0, 0, False, lines,
'load-index', self.repo_dir)
svntest.verify.verify_outputs("Error while rewriting index",
output, errput, [], [])
svntest.verify.verify_exit_code(None, exit_code, 0)
def get_item(self, item):
""" Return offset, length and type of ITEM. """
values = self.by_item[item]
offset = int(values[0], 16)
len = int(values[1], 16)
type = values[2]
return (offset, len, type)
def modify_item(self, item, offset, len):
""" Modify offset and length of ITEM. """
values = self.by_item[item]
values[0] = '%x' % offset
values[1] = '%x' % len
self._write()
def repo_format(sbox):
""" Return the repository format number for SBOX."""
format_file = open(os.path.join(sbox.repo_dir, "db", "format"))
format = int(format_file.read()[:1])
format_file.close()
return format
def set_changed_path_list(sbox, revision, changes):
""" Replace the changed paths list in the revision file REVISION in SBOX
with the text CHANGES."""
idx = None
# read full file
fp = open(fsfs_file(sbox.repo_dir, 'revs', str(revision)), 'r+b')
contents = fp.read()
length = len(contents)
if repo_format(sbox) < 7:
# replace the changed paths list
header = contents[contents.rfind(b'\n', length - 64, length - 1):]
body_len = int(header.split(b' ')[1])
else:
# read & parse revision file footer
footer_length = contents[length-1];
if isinstance(footer_length, str):
footer_length = ord(footer_length)
footer = contents[length - footer_length - 1:length-1]
l2p_offset = int(footer.split(b' ')[0])
l2p_checksum = footer.split(b' ')[1]
p2l_offset = int(footer.split(b' ')[2])
p2l_checksum = footer.split(b' ')[3]
idx = FSFS_Index(sbox, revision)
(offset, item_len, item_type) = idx.get_item(1)
# split file contents
body_len = offset
indexes = contents[l2p_offset:length - footer_length - 1]
# construct new footer, include indexes as are
file_len = body_len + len(changes) + 1
p2l_offset += file_len - l2p_offset
header = str(file_len).encode() + b' ' + l2p_checksum + b' ' \
+ str(p2l_offset).encode() + b' ' + p2l_checksum
header += bytes([len(header)])
header = b'\n' + indexes + header
contents = contents[:body_len] + changes + header
# set new contents
fp.seek(0)
fp.write(contents)
fp.truncate()
fp.close()
if repo_format(sbox) >= 7:
idx.modify_item(1, offset, len(changes) + 1)
######################################################################
# Tests
#----------------------------------------------------------------------
# dump stream tests need a dump file
def clean_dumpfile():
return \
[ b"SVN-fs-dump-format-version: 2\n\n",
b"UUID: 668cc64a-31ed-0310-8ccb-b75d75bb44e3\n\n",
b"Revision-number: 0\n",
b"Prop-content-length: 56\n",
b"Content-length: 56\n\n",
b"K 8\nsvn:date\nV 27\n2005-01-08T21:48:13.838745Z\nPROPS-END\n\n\n",
b"Revision-number: 1\n",
b"Prop-content-length: 98\n",
b"Content-length: 98\n\n",
b"K 7\nsvn:log\nV 0\n\nK 10\nsvn:author\nV 4\nerik\n",
b"K 8\nsvn:date\nV 27\n2005-01-08T21:51:16.313791Z\nPROPS-END\n\n\n",
b"Node-path: A\n",
b"Node-kind: file\n",
b"Node-action: add\n",
b"Prop-content-length: 35\n",
b"Text-content-length: 5\n",
b"Text-content-md5: e1cbb0c3879af8347246f12c559a86b5\n",
b"Content-length: 40\n\n",
b"K 12\nsvn:keywords\nV 2\nId\nPROPS-END\ntext\n\n\n"]
dumpfile_revisions = \
[ svntest.wc.State('', { 'A' : svntest.wc.StateItem(contents="text\n") }) ]
#----------------------------------------------------------------------
def extra_headers(sbox):
"loading of dumpstream with extra headers"
sbox.build(empty=True)
dumpfile = clean_dumpfile()
dumpfile[3:3] = \
[ b"X-Comment-Header: Ignored header normally not in dump stream\n" ]
load_and_verify_dumpstream(sbox,[],[], dumpfile_revisions, False, dumpfile,
'--ignore-uuid')
#----------------------------------------------------------------------
# Ensure loading continues after skipping a bit of unknown extra content.
def extra_blockcontent(sbox):
"load success on oversized Content-length"
sbox.build(empty=True)
dumpfile = clean_dumpfile()
# Replace "Content-length" line with two lines
dumpfile[8:9] = \
[ b"Extra-content-length: 10\n",
b"Content-length: 108\n\n" ]
# Insert the extra content after "PROPS-END\n"
dumpfile[11] = dumpfile[11][:-2] + b"extra text\n\n\n"
load_and_verify_dumpstream(sbox,[],[], dumpfile_revisions, False, dumpfile,
'--ignore-uuid')
#----------------------------------------------------------------------
def inconsistent_headers(sbox):
"load failure on undersized Content-length"
sbox.build(empty=True)
dumpfile = clean_dumpfile()
dumpfile[-2] = b"Content-length: 30\n\n"
load_and_verify_dumpstream(sbox, [], svntest.verify.AnyOutput,
dumpfile_revisions, False, dumpfile)
#----------------------------------------------------------------------
# Test for issue #2729: Datestamp-less revisions in dump streams do
# not remain so after load
@Issue(2729)
def empty_date(sbox):
"preserve date-less revisions in load"
sbox.build(empty=True)
dumpfile = clean_dumpfile()
# Replace portions of the revision data to drop the svn:date revprop.
dumpfile[7:11] = \
[ b"Prop-content-length: 52\n",
b"Content-length: 52\n\n",
b"K 7\nsvn:log\nV 0\n\nK 10\nsvn:author\nV 4\nerik\nPROPS-END\n\n\n"
]
load_and_verify_dumpstream(sbox,[],[], dumpfile_revisions, False, dumpfile,
'--ignore-uuid')
# Verify that the revision still lacks the svn:date property.
svntest.actions.run_and_verify_svn([], '.*(E195011|E200017).*svn:date',
"propget", "--revprop", "-r1", "svn:date",
sbox.wc_dir)
#----------------------------------------------------------------------
def dump_copied_dir(sbox):
"'svnadmin dump' on copied directory"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
old_C_path = os.path.join(wc_dir, 'A', 'C')
new_C_path = os.path.join(wc_dir, 'A', 'B', 'C')
svntest.main.run_svn(None, 'cp', old_C_path, new_C_path)
sbox.simple_commit(message='log msg')
exit_code, output, errput = svntest.main.run_svnadmin("dump", repo_dir)
if svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump' is unexpected.",
'STDERR', ["* Dumped revision 0.\n",
"* Dumped revision 1.\n",
"* Dumped revision 2.\n"], errput):
raise svntest.Failure
#----------------------------------------------------------------------
def dump_move_dir_modify_child(sbox):
"'svnadmin dump' on modified child of copied dir"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
B_path = os.path.join(wc_dir, 'A', 'B')
Q_path = os.path.join(wc_dir, 'A', 'Q')
svntest.main.run_svn(None, 'cp', B_path, Q_path)
svntest.main.file_append(os.path.join(Q_path, 'lambda'), 'hello')
sbox.simple_commit(message='log msg')
exit_code, output, errput = svntest.main.run_svnadmin("dump", repo_dir)
svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump' is unexpected.",
'STDERR', ["* Dumped revision 0.\n",
"* Dumped revision 1.\n",
"* Dumped revision 2.\n"], errput)
exit_code, output, errput = svntest.main.run_svnadmin("dump", "-r",
"0:HEAD", repo_dir)
svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump' is unexpected.",
'STDERR', ["* Dumped revision 0.\n",
"* Dumped revision 1.\n",
"* Dumped revision 2.\n"], errput)
#----------------------------------------------------------------------
def dump_quiet(sbox):
"'svnadmin dump --quiet'"
sbox.build(create_wc = False)
exit_code, dump, errput = svntest.main.run_svnadmin("dump", sbox.repo_dir,
'--quiet')
svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump --quiet' is unexpected.",
'STDERR', [], errput)
#----------------------------------------------------------------------
def hotcopy_dot(sbox):
"'svnadmin hotcopy PATH .'"
sbox.build()
backup_dir, backup_url = sbox.add_repo_path('backup')
os.mkdir(backup_dir)
cwd = os.getcwd()
os.chdir(backup_dir)
svntest.actions.run_and_verify_svnadmin(
None, [],
"hotcopy", os.path.join(cwd, sbox.repo_dir), '.')
os.chdir(cwd)
if svntest.main.is_fs_type_fsfs():
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
if svntest.main.is_fs_type_bdb():
check_hotcopy_bdb(sbox.repo_dir, backup_dir)
if svntest.main.is_fs_type_fsx():
check_hotcopy_fsx(sbox.repo_dir, backup_dir)
#----------------------------------------------------------------------
# This test is redundant for FSFS. The hotcopy_dot and hotcopy_incremental
# tests cover this check for FSFS already.
@SkipUnless(svntest.main.is_fs_type_bdb)
def hotcopy_format(sbox):
"'svnadmin hotcopy' checking db/format file"
sbox.build()
backup_dir, backup_url = sbox.add_repo_path('backup')
exit_code, output, errput = svntest.main.run_svnadmin("hotcopy",
sbox.repo_dir,
backup_dir)
if errput:
logger.warn("Error: hotcopy failed")
raise svntest.Failure
# verify that the db/format files are the same
fp = open(os.path.join(sbox.repo_dir, "db", "format"))
contents1 = fp.read()
fp.close()
fp2 = open(os.path.join(backup_dir, "db", "format"))
contents2 = fp2.read()
fp2.close()
if contents1 != contents2:
logger.warn("Error: db/format file contents do not match after hotcopy")
raise svntest.Failure
#----------------------------------------------------------------------
def setrevprop(sbox):
"setlog, setrevprop, delrevprop; bypass hooks"
sbox.build()
# Try a simple log property modification.
iota_path = os.path.join(sbox.wc_dir, "iota")
mu_path = sbox.ospath('A/mu')
svntest.actions.run_and_verify_svnadmin([], [],
"setlog", sbox.repo_dir, "-r0",
"--bypass-hooks",
iota_path)
# Make sure it fails without --bypass-hooks. (We haven't called
# svntest.actions.enable_revprop_changes().)
#
# Note that we attempt to set the log message to a different value than the
# successful call.
svntest.actions.run_and_verify_svnadmin([], svntest.verify.AnyOutput,
"setlog", sbox.repo_dir, "-r0",
mu_path)
# Verify that the revprop value matches what we set when retrieved
# through the client.
svntest.actions.run_and_verify_svn([ "This is the file 'iota'.\n", "\n" ],
[], "propget", "--revprop", "-r0",
"svn:log", sbox.wc_dir)
# Try an author property modification.
foo_path = os.path.join(sbox.wc_dir, "foo")
svntest.main.file_write(foo_path, "foo")
exit_code, output, errput = svntest.main.run_svnadmin("setrevprop",
sbox.repo_dir,
"-r0", "svn:author",
foo_path)
if errput:
logger.warn("Error: 'setrevprop' failed")
raise svntest.Failure
# Verify that the revprop value matches what we set when retrieved
# through the client.
svntest.actions.run_and_verify_svn([ "foo\n" ], [], "propget",
"--revprop", "-r0", "svn:author",
sbox.wc_dir)
# Delete the property.
svntest.actions.run_and_verify_svnadmin([], [],
"delrevprop", "-r0", sbox.repo_dir,
"svn:author")
svntest.actions.run_and_verify_svnlook([], ".*E200017.*svn:author.*",
"propget", "--revprop", "-r0",
sbox.repo_dir, "svn:author")
def verify_windows_paths_in_repos(sbox):
"verify a repository containing paths like 'c:hi'"
# setup a repo with a directory 'c:hi'
sbox.build(create_wc = False)
repo_url = sbox.repo_url
chi_url = sbox.repo_url + '/c:hi'
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
chi_url)
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
# unfortunately, some backends needs to do more checks than other
# resulting in different progress output
if svntest.main.is_fs_log_addressing():
svntest.verify.compare_and_display_lines(
"Error while running 'svnadmin verify'.",
'STDOUT', ["* Verifying metadata at revision 0 ...\n",
"* Verifying repository metadata ...\n",
"* Verified revision 0.\n",
"* Verified revision 1.\n",
"* Verified revision 2.\n"], output)
elif svntest.main.fs_has_rep_sharing() and not svntest.main.is_fs_type_bdb():
svntest.verify.compare_and_display_lines(
"Error while running 'svnadmin verify'.",
'STDOUT', ["* Verifying repository metadata ...\n",
"* Verified revision 0.\n",
"* Verified revision 1.\n",
"* Verified revision 2.\n"], output)
else:
svntest.verify.compare_and_display_lines(
"Error while running 'svnadmin verify'.",
'STDOUT', ["* Verified revision 0.\n",
"* Verified revision 1.\n",
"* Verified revision 2.\n"], output)
#----------------------------------------------------------------------
# Returns the filename of the rev or revprop file (according to KIND)
# numbered REV in REPO_DIR, which must be in the first shard if we're
# using a sharded repository.
def fsfs_file(repo_dir, kind, rev):
if svntest.main.options.server_minor_version >= 5:
if svntest.main.options.fsfs_sharding is None:
if svntest.main.is_fs_type_fsx():
rev = 'r' + rev
return os.path.join(repo_dir, 'db', kind, '0', rev)
else:
shard = int(rev) // svntest.main.options.fsfs_sharding
if svntest.main.is_fs_type_fsx():
rev = 'r' + rev
path = os.path.join(repo_dir, 'db', kind, str(shard), rev)
if svntest.main.options.fsfs_packing is None or kind == 'revprops':
# we don't pack revprops
return path
elif os.path.exists(path):
# rev exists outside a pack file.
return path
else:
# didn't find the plain file; assume it's in a pack file
return os.path.join(repo_dir, 'db', kind, ('%d.pack' % shard), 'pack')
else:
return os.path.join(repo_dir, 'db', kind, rev)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_incremental_fsfs(sbox):
"""svnadmin verify detects corruption dump can't"""
if svntest.main.options.fsfs_version is not None and \
svntest.main.options.fsfs_version not in [4, 6]:
raise svntest.Skip("Unsupported prepackaged repository version")
# setup a repo with a directory 'c:hi'
# use physical addressing as this is hard to provoke with logical addressing
sbox.build(create_wc = False,
minor_version = min(svntest.main.options.server_minor_version,8))
repo_url = sbox.repo_url
E_url = sbox.repo_url + '/A/B/E'
# Create A/B/E/bravo in r2.
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
E_url + '/bravo')
# Corrupt r2's reference to A/C by replacing "dir 7-1.0.r1/1568" with
# "dir 7-1.0.r1/1569" (increment offset) and updating the checksum for
# this directory listing to "c9b5a2d26473a4e28088673dda9df804" so that
# the listing itself is valid.
r2 = fsfs_file(sbox.repo_dir, 'revs', '2')
if r2.endswith('pack'):
raise svntest.Skip("Test doesn't handle packed revisions")
fp = open(r2, 'wb')
fp.write(b"""id: 0-2.0.r2/0
type: dir
count: 0
cpath: /A/B/E/bravo
copyroot: 0 /
PLAIN
K 5
alpha
V 17
file 3-1.0.r1/719
K 4
beta
V 17
file 4-1.0.r1/840
K 5
bravo
V 14
dir 0-2.0.r2/0
END
ENDREP
id: 2-1.0.r2/181
type: dir
pred: 2-1.0.r1/1043
count: 1
text: 2 69 99 99 f63001f7fddd1842d8891474d0982111
cpath: /A/B/E
copyroot: 0 /
PLAIN
K 1
E
V 16
dir 2-1.0.r2/181
K 1
F
V 17
dir 5-1.0.r1/1160
K 6
lambda
V 17
file 6-1.0.r1/597
END
ENDREP
id: 1-1.0.r2/424
type: dir
pred: 1-1.0.r1/1335
count: 1
text: 2 316 95 95 bccb66379b4f825dac12b50d80211bae
cpath: /A/B
copyroot: 0 /
PLAIN
K 1
B
V 16
dir 1-1.0.r2/424
K 1
C
V 17
dir 7-1.0.r1/1569
K 1
D
V 17
dir 8-1.0.r1/3061
K 2
mu
V 18
file i-1.0.r1/1451
END
ENDREP
id: 0-1.0.r2/692
type: dir
pred: 0-1.0.r1/3312
count: 1
text: 2 558 121 121 c9b5a2d26473a4e28088673dda9df804
cpath: /A
copyroot: 0 /
PLAIN
K 1
A
V 16
dir 0-1.0.r2/692
K 4
iota
V 18
file j-1.0.r1/3428
END
ENDREP
id: 0.0.r2/904
type: dir
pred: 0.0.r1/3624
count: 2
text: 2 826 65 65 e44e4151d0d124533338619f082c8c9a
cpath: /
copyroot: 0 /
_0.0.t1-1 add false false /A/B/E/bravo
904 1031
""")
fp.close()
exit_code, output, errput = svntest.main.run_svnadmin("verify", "-r2",
sbox.repo_dir)
svntest.verify.verify_outputs(
message=None, actual_stdout=output, actual_stderr=errput,
expected_stdout=None,
expected_stderr=".*Found malformed header '[^']*' in revision file"
"|.*Missing id field in node-rev.*")
#----------------------------------------------------------------------
# Helper for two test functions.
def corrupt_and_recover_db_current(sbox, minor_version=None):
"""Build up a MINOR_VERSION sandbox and test different recovery scenarios
with missing, out-of-date or even corrupt db/current files. Recovery should
behave the same way with all values of MINOR_VERSION, hence this helper
containing the common code that allows us to check it."""
sbox.build(minor_version=minor_version)
current_path = os.path.join(sbox.repo_dir, 'db', 'current')
# Commit up to r3, so we can test various recovery scenarios.
svntest.main.file_append(os.path.join(sbox.wc_dir, 'iota'), 'newer line\n')
sbox.simple_commit(message='log msg')
svntest.main.file_append(os.path.join(sbox.wc_dir, 'iota'), 'newest line\n')
sbox.simple_commit(message='log msg')
# Remember the contents of the db/current file.
expected_current_contents = open(current_path).read()
# Move aside the current file for r3.
os.rename(os.path.join(sbox.repo_dir, 'db','current'),
os.path.join(sbox.repo_dir, 'db','was_current'))
# Run 'svnadmin recover' and check that the current file is recreated.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
actual_current_contents = open(current_path).read()
svntest.verify.compare_and_display_lines(
"Contents of db/current is unexpected.",
'db/current', expected_current_contents, actual_current_contents)
# Now try writing db/current to be one rev lower than it should be.
svntest.main.file_write(current_path, '2\n')
# Run 'svnadmin recover' and check that the current file is fixed.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
actual_current_contents = open(current_path).read()
svntest.verify.compare_and_display_lines(
"Contents of db/current is unexpected.",
'db/current', expected_current_contents, actual_current_contents)
# Now try writing db/current to be *two* revs lower than it should be.
svntest.main.file_write(current_path, '1\n')
# Run 'svnadmin recover' and check that the current file is fixed.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
actual_current_contents = open(current_path).read()
svntest.verify.compare_and_display_lines(
"Contents of db/current is unexpected.",
'db/current', expected_current_contents, actual_current_contents)
# Now try writing db/current to be fish revs lower than it should be.
#
# Note: I'm not actually sure it's wise to recover from this, but
# detecting it would require rewriting fs_fs.c:get_youngest() to
# check the actual contents of its buffer, since atol() will happily
# convert "fish" to 0.
svntest.main.file_write(current_path, 'fish\n')
# Run 'svnadmin recover' and check that the current file is fixed.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
actual_current_contents = open(current_path).read()
svntest.verify.compare_and_display_lines(
"Contents of db/current is unexpected.",
'db/current', expected_current_contents, actual_current_contents)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_recover_db_current(sbox):
"fsfs recover db/current"
corrupt_and_recover_db_current(sbox)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_recover_old_db_current(sbox):
"fsfs recover db/current --compatible-version=1.3"
# Around trunk@1573728, 'svnadmin recover' wrongly errored out
# for the --compatible-version=1.3 repositories with missing or
# invalid db/current file:
# svnadmin: E160006: No such revision 1
corrupt_and_recover_db_current(sbox, minor_version=3)
#----------------------------------------------------------------------
@Issue(2983)
def load_with_parent_dir(sbox):
"'svnadmin load --parent-dir' reparents mergeinfo"
## See https://issues.apache.org/jira/browse/SVN-2983. ##
sbox.build(empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'mergeinfo_included.dump')
dumpfile = svntest.actions.load_dumpfile(dumpfile_location)
# Create 'sample' dir in sbox.repo_url, and load the dump stream there.
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 1.\n'],
[], "mkdir", sbox.repo_url + "/sample",
"-m", "Create sample dir")
load_dumpstream(sbox, dumpfile, '--parent-dir', '/sample')
# Verify the svn:mergeinfo properties for '--parent-dir'
svntest.actions.run_and_verify_svn([sbox.repo_url +
"/sample/branch - /sample/trunk:5-7\n"],
[], 'propget', 'svn:mergeinfo', '-R',
sbox.repo_url + '/sample/branch')
svntest.actions.run_and_verify_svn([sbox.repo_url +
"/sample/branch1 - " +
"/sample/branch:6-9\n"],
[], 'propget', 'svn:mergeinfo', '-R',
sbox.repo_url + '/sample/branch1')
# Create 'sample-2' dir in sbox.repo_url, and load the dump stream again.
# This time, don't include a leading slash on the --parent-dir argument.
# See issue #3547.
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 11.\n'],
[], "mkdir", sbox.repo_url + "/sample-2",
"-m", "Create sample-2 dir")
load_dumpstream(sbox, dumpfile, '--parent-dir', 'sample-2')
# Verify the svn:mergeinfo properties for '--parent-dir'.
svntest.actions.run_and_verify_svn([sbox.repo_url +
"/sample-2/branch - " +
"/sample-2/trunk:15-17\n"],
[], 'propget', 'svn:mergeinfo', '-R',
sbox.repo_url + '/sample-2/branch')
svntest.actions.run_and_verify_svn([sbox.repo_url +
"/sample-2/branch1 - " +
"/sample-2/branch:16-19\n"],
[], 'propget', 'svn:mergeinfo', '-R',
sbox.repo_url + '/sample-2/branch1')
#----------------------------------------------------------------------
def set_uuid(sbox):
"test 'svnadmin setuuid'"
sbox.build(create_wc=False)
# Squirrel away the original repository UUID.
exit_code, output, errput = svntest.main.run_svnlook('uuid', sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
orig_uuid = output[0].rstrip()
# Try setting a new, bogus UUID.
svntest.actions.run_and_verify_svnadmin(None, '^.*Malformed UUID.*$',
'setuuid', sbox.repo_dir, 'abcdef')
# Try generating a brand new UUID.
svntest.actions.run_and_verify_svnadmin([], None,
'setuuid', sbox.repo_dir)
exit_code, output, errput = svntest.main.run_svnlook('uuid', sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
new_uuid = output[0].rstrip()
if new_uuid == orig_uuid:
logger.warn("Error: new UUID matches the original one")
raise svntest.Failure
# Now, try setting the UUID back to the original value.
svntest.actions.run_and_verify_svnadmin([], None,
'setuuid', sbox.repo_dir, orig_uuid)
exit_code, output, errput = svntest.main.run_svnlook('uuid', sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
new_uuid = output[0].rstrip()
if new_uuid != orig_uuid:
logger.warn("Error: new UUID doesn't match the original one")
raise svntest.Failure
#----------------------------------------------------------------------
@Issue(3020)
def reflect_dropped_renumbered_revs(sbox):
"reflect dropped renumbered revs in svn:mergeinfo"
## See https://issues.apache.org/jira/browse/SVN-3020. ##
sbox.build(empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'with_merges.dump')
dumpfile = svntest.actions.load_dumpfile(dumpfile_location)
# Create 'toplevel' dir in sbox.repo_url
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 1.\n'],
[], "mkdir", sbox.repo_url + "/toplevel",
"-m", "Create toplevel dir")
# Load the dump stream in sbox.repo_url
load_dumpstream(sbox, dumpfile)
# Load the dump stream in toplevel dir
load_dumpstream(sbox, dumpfile, '--parent-dir', '/toplevel')
# Verify the svn:mergeinfo properties
url = sbox.repo_url
expected_output = svntest.verify.UnorderedOutput([
url + "/trunk - /branch1:5-9\n",
url + "/toplevel/trunk - /toplevel/branch1:14-18\n",
])
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
#----------------------------------------------------------------------
@SkipUnless(svntest.main.is_fs_type_fsfs)
@Issue(2992)
def fsfs_recover_handle_missing_revs_or_revprops_file(sbox):
"""fsfs recovery checks missing revs / revprops files"""
# Set up a repository containing the greek tree.
sbox.build()
# Commit up to r3, so we can test various recovery scenarios.
svntest.main.file_append(os.path.join(sbox.wc_dir, 'iota'), 'newer line\n')
sbox.simple_commit(message='log msg')
svntest.main.file_append(os.path.join(sbox.wc_dir, 'iota'), 'newest line\n')
sbox.simple_commit(message='log msg')
rev_3 = fsfs_file(sbox.repo_dir, 'revs', '3')
rev_was_3 = rev_3 + '.was'
# Move aside the revs file for r3.
os.rename(rev_3, rev_was_3)
# Verify 'svnadmin recover' fails when youngest has a revprops
# file but no revs file.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin recover' is unexpected.", None, errput, None,
".*Expected current rev to be <= %s but found 3"
# For example, if svntest.main.fsfs_sharding == 2, then rev_3 would
# be the pack file for r2:r3, and the error message would report "<= 1".
% (rev_3.endswith('pack') and '[012]' or '2')):
raise svntest.Failure
# Restore the r3 revs file, thus repairing the repository.
os.rename(rev_was_3, rev_3)
revprop_3 = fsfs_file(sbox.repo_dir, 'revprops', '3')
revprop_was_3 = revprop_3 + '.was'
# Move aside the revprops file for r3.
os.rename(revprop_3, revprop_was_3)
# Verify 'svnadmin recover' fails when youngest has a revs file
# but no revprops file (issue #2992).
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin recover' is unexpected.", None, errput, None,
".*Revision 3 has a revs file but no revprops file"):
raise svntest.Failure
# Restore the r3 revprops file, thus repairing the repository.
os.rename(revprop_was_3, revprop_3)
# Change revprops file to a directory for revision 3
os.rename(revprop_3, revprop_was_3)
os.mkdir(revprop_3)
# Verify 'svnadmin recover' fails when youngest has a revs file
# but revprops file is not a file (another aspect of issue #2992).
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin recover' is unexpected.", None, errput, None,
".*Revision 3 has a non-file where its revprops file should be.*"):
raise svntest.Failure
# Restore the r3 revprops file, thus repairing the repository.
os.rmdir(revprop_3)
os.rename(revprop_was_3, revprop_3)
#----------------------------------------------------------------------
@Skip(svntest.main.tests_use_prepackaged_repository)
def create_in_repo_subdir(sbox):
"'svnadmin create /path/to/repo/subdir'"
sbox.build(create_wc=False, empty=True)
repo_dir = sbox.repo_dir
success = False
try:
# This should fail
subdir = os.path.join(repo_dir, 'Z')
svntest.main.create_repos(subdir)
except svntest.main.SVNRepositoryCreateFailure:
success = True
if not success:
raise svntest.Failure
cwd = os.getcwd()
success = False
try:
# This should fail, too
subdir = os.path.join(repo_dir, 'conf')
os.chdir(subdir)
svntest.main.create_repos('Z')
os.chdir(cwd)
except svntest.main.SVNRepositoryCreateFailure:
success = True
os.chdir(cwd)
if not success:
raise svntest.Failure
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipDumpLoadCrossCheck()
def verify_with_invalid_revprops(sbox):
"svnadmin verify detects invalid revprops file"
sbox.build(create_wc=False, empty=True)
repo_dir = sbox.repo_dir
# Run a test verify
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
if svntest.verify.verify_outputs(
"Output of 'svnadmin verify' is unexpected.", None, output, None,
".*Verified revision 0*"):
raise svntest.Failure
# Empty the revprops file
rp_file = open(os.path.join(repo_dir, 'db', 'revprops', '0', '0'), 'w')
rp_file.write('')
rp_file.close()
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin verify' is unexpected.", None, errput, None,
".*svnadmin: E200002:.*"):
raise svntest.Failure
#----------------------------------------------------------------------
# Even *more* testing for issue #3020 'Reflect dropped/renumbered
# revisions in svn:mergeinfo data during svnadmin load'
#
# Full or incremental dump-load cycles should result in the same
# mergeinfo in the loaded repository.
#
# Given a repository 'SOURCE-REPOS' with mergeinfo, and a repository
# 'TARGET-REPOS' (which may or may not be empty), either of the following
# methods to move 'SOURCE-REPOS' to 'TARGET-REPOS' should result in
# the same mergeinfo on 'TARGET-REPOS':
#
# 1) Dump -r1:HEAD from 'SOURCE-REPOS' and load it in one shot to
# 'TARGET-REPOS'.
#
# 2) Dump 'SOURCE-REPOS' in a series of incremental dumps and load
# each of them to 'TARGET-REPOS'.
#
# See https://issues.apache.org/jira/browse/SVN-3020#desc13
@Issue(3020)
def dont_drop_valid_mergeinfo_during_incremental_loads(sbox):
"don't filter mergeinfo revs from incremental dump"
# Create an empty repos.
sbox.build(empty=True)
# PART 1: Load a full dump to an empty repository.
#
# The test repository used here, 'mergeinfo_included_full.dump', is
# this repos:
# __________________________________________
# | |
# | ____________________________|_____
# | | | |
# trunk---r2---r3-----r5---r6-------r8---r9---------------> | |
# r1 | | | | | |
# initial | | | |______ | |
# import copy | copy | merge merge
# | | | merge (r5) (r8)
# | | | (r9) | |
# | | | | | |
# | | V V | |
# | | branches/B2-------r11---r12----> | |
# | | r7 |____| | |
# | | | | |
# | merge |___ | |
# | (r6) | | |
# | |_________________ | | |
# | | merge | |
# | | (r11-12) | |
# | | | | |
# V V V | |
# branches/B1-------------------r10--------r13--> | |
# r4 | |
# | V V
# branches/B1/B/E------------------------------r14---r15->
#
#
# The mergeinfo on this repos@15 is:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /branches/B2:11-12
# /trunk:6,9
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /branches/B2/B/E:11-12
# /trunk/B/E:5-6,8-9
# Properties on 'branches/B2':
# svn:mergeinfo
# /trunk:9
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'mergeinfo_included_full.dump')
dumpfile_full = svntest.actions.load_dumpfile(dumpfile_location)
load_dumpstream(sbox, dumpfile_full, '--ignore-uuid')
# Check that the mergeinfo is as expected.
url = sbox.repo_url + '/branches/'
expected_output = svntest.verify.UnorderedOutput([
url + "B1 - /branches/B2:11-12\n",
"/trunk:6,9\n",
url + "B2 - /trunk:9\n",
url + "B1/B/E - /branches/B2/B/E:11-12\n",
"/trunk/B/E:5-6,8-9\n"])
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
# PART 2: Load a a series of incremental dumps to an empty repository.
#
# Incrementally dump the repository into three dump files:
dump_file_r1_10 = sbox.get_tempname("r1-10-dump")
exit_code, output, errput = svntest.main.run_svnadmin(
'dump', sbox.repo_dir, '-r1:10')
dump_fp = open(dump_file_r1_10, 'wb')
dump_fp.writelines(output)
dump_fp.close()
dump_file_r11_13 = sbox.get_tempname("r11-13-dump")
exit_code, output, errput = svntest.main.run_svnadmin(
'dump', sbox.repo_dir, '--incremental', '-r11:13')
dump_fp = open(dump_file_r11_13, 'wb')
dump_fp.writelines(output)
dump_fp.close()
dump_file_r14_15 = sbox.get_tempname("r14-15-dump")
exit_code, output, errput = svntest.main.run_svnadmin(
'dump', sbox.repo_dir, '--incremental', '-r14:15')
dump_fp = open(dump_file_r14_15, 'wb')
dump_fp.writelines(output)
dump_fp.close()
# Blow away the current repos and create an empty one in its place.
sbox.build(empty=True)
# Load the three incremental dump files in sequence.
load_dumpstream(sbox, svntest.actions.load_dumpfile(dump_file_r1_10),
'--ignore-uuid')
load_dumpstream(sbox, svntest.actions.load_dumpfile(dump_file_r11_13),
'--ignore-uuid')
load_dumpstream(sbox, svntest.actions.load_dumpfile(dump_file_r14_15),
'--ignore-uuid')
# Check the mergeinfo, we use the same expected output as before,
# as it (duh!) should be exactly the same as when we loaded the
# repos in one shot.
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
# Now repeat the above two scenarios, but with an initially non-empty target
# repository. First, try the full dump-load in one shot.
#
# PART 3: Load a full dump to an non-empty repository.
#
# Reset our sandbox.
sbox.build(empty=True)
# Load this skeleton repos into the empty target:
#
# Projects/ (Added r1)
# README (Added r2)
# Project-X (Added r3)
# Project-Y (Added r4)
# Project-Z (Added r5)
# docs/ (Added r6)
# README (Added r6)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'skeleton_repos.dump')
dumpfile_skeleton = svntest.actions.load_dumpfile(dumpfile_location)
load_dumpstream(sbox, dumpfile_skeleton, '--ignore-uuid')
# Load 'svnadmin_tests_data/mergeinfo_included_full.dump' in one shot:
load_dumpstream(sbox, dumpfile_full, '--parent-dir', 'Projects/Project-X',
'--ignore-uuid')
# Check that the mergeinfo is as expected. This is exactly the
# same expected mergeinfo we previously checked, except that the
# revisions are all offset +6 to reflect the revions already in
# the skeleton target before we began loading and the leading source
# paths are adjusted by the --parent-dir:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /Projects/Project-X/branches/B2:17-18
# /Projects/Project-X/trunk:12,15
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /Projects/Project-X/branches/B2/B/E:17-18
# /Projects/Project-X/trunk/B/E:11-12,14-15
# Properties on 'branches/B2':
# svn:mergeinfo
# /Projects/Project-X/trunk:15
url = sbox.repo_url + '/Projects/Project-X/branches/'
expected_output = svntest.verify.UnorderedOutput([
url + "B1 - /Projects/Project-X/branches/B2:17-18\n",
"/Projects/Project-X/trunk:12,15\n",
url + "B2 - /Projects/Project-X/trunk:15\n",
url + "B1/B/E - /Projects/Project-X/branches/B2/B/E:17-18\n",
"/Projects/Project-X/trunk/B/E:11-12,14-15\n"])
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
# PART 4: Load a a series of incremental dumps to an non-empty repository.
#
# Reset our sandbox.
sbox.build(empty=True)
# Load this skeleton repos into the empty target:
load_dumpstream(sbox, dumpfile_skeleton, '--ignore-uuid')
# Load the three incremental dump files in sequence.
load_dumpstream(sbox, svntest.actions.load_dumpfile(dump_file_r1_10),
'--parent-dir', 'Projects/Project-X', '--ignore-uuid')
load_dumpstream(sbox, svntest.actions.load_dumpfile(dump_file_r11_13),
'--parent-dir', 'Projects/Project-X', '--ignore-uuid')
load_dumpstream(sbox, svntest.actions.load_dumpfile(dump_file_r14_15),
'--parent-dir', 'Projects/Project-X', '--ignore-uuid')
# Check the resulting mergeinfo. We expect the exact same results
# as Part 3.
# See https://issues.apache.org/jira/browse/SVN-3020#desc16.
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
@SkipUnless(svntest.main.is_posix_os)
@Issue(2591)
def hotcopy_symlink(sbox):
"'svnadmin hotcopy' replicates symlink"
## See https://issues.apache.org/jira/browse/SVN-2591. ##
# Create a repository.
sbox.build(create_wc=False, empty=True)
original_repo = sbox.repo_dir
hotcopy_repo, hotcopy_url = sbox.add_repo_path('hotcopy')
# Create a file, a dir and a missing path outside the repoitory.
svntest.main.safe_rmtree(sbox.wc_dir, 1)
os.mkdir(sbox.wc_dir)
external_file_path = os.path.join(sbox.wc_dir, "file")
svntest.main.file_write(external_file_path, "An existing file")
external_dir_path = os.path.join(sbox.wc_dir, "dir")
os.mkdir(external_dir_path)
external_missing_path = os.path.join(sbox.wc_dir, "missing")
# Symlink definitions: base name -> target relpath.
# Check both existing and nonexistent targets.
# Check targets both within and outside the source repository.
symlinks = [
('in_repos_file', 'format'),
('in_repos_dir', 'conf'),
('in_repos_missing', 'missing'),
('external_file', os.path.join('..', '..', '..', external_file_path)),
('external_dir', os.path.join('..', '..', '..', external_dir_path)),
('external_missing', os.path.join('..', '..', '..', external_missing_path)),
]
# Create symlinks within the repository directory.
for name, target_relpath in symlinks:
target_path = os.path.join(original_repo, target_relpath)
target_abspath = os.path.abspath(target_path)
# Create two symlinks to each target - one relative, one absolute.
symlink_path = os.path.join(original_repo, name)
os.symlink(target_relpath, symlink_path + '_rel')
os.symlink(target_abspath, symlink_path + '_abs')
svntest.actions.run_and_verify_svnadmin(
None, [],
"hotcopy", original_repo, hotcopy_repo)
# Check if the symlinks were copied correctly.
for name, target_relpath in symlinks:
target_path = os.path.join(original_repo, target_relpath)
target_abspath = os.path.abspath(target_path)
# Check two symlinks to each target - one relative, one absolute.
symlink_path = os.path.join(hotcopy_repo, name)
if os.readlink(symlink_path + '_rel') != target_relpath:
raise svntest.Failure
if os.readlink(symlink_path + '_abs') != target_abspath:
raise svntest.Failure
def load_bad_props(sbox):
"svnadmin load with invalid svn: props"
dump_str = b"""SVN-fs-dump-format-version: 2
UUID: dc40867b-38f6-0310-9f5f-f81aa277e06f
Revision-number: 0
Prop-content-length: 56
Content-length: 56
K 8
svn:date
V 27
2005-05-03T19:09:41.129900Z
PROPS-END
Revision-number: 1
Prop-content-length: 99
Content-length: 99
K 7
svn:log
V 3
\n\r\n
K 10
svn:author
V 2
pl
K 8
svn:date
V 27
2005-05-03T19:10:19.975578Z
PROPS-END
Node-path: file
Node-kind: file
Node-action: add
Prop-content-length: 10
Text-content-length: 5
Text-content-md5: e1cbb0c3879af8347246f12c559a86b5
Content-length: 15
PROPS-END
text
"""
sbox.build(empty=True)
# Try to load the dumpstream, expecting a failure (because of mixed EOLs).
exp_err = svntest.verify.RegexListOutput(['svnadmin: E125005:.*',
'svnadmin: E125005:.*',
'svnadmin: E125017:.*'],
match_all=False)
load_and_verify_dumpstream(sbox, [], exp_err, dumpfile_revisions,
False, dump_str, '--ignore-uuid')
# Now try it again bypassing prop validation. (This interface takes
# care of the removal and recreation of the original repository.)
svntest.actions.load_repo(sbox, dump_str=dump_str,
bypass_prop_validation=True)
# Getting the property should fail.
svntest.actions.run_and_verify_svn(None, 'svn: E135000: ',
'pg', 'svn:log', '--revprop', '-r1',
sbox.repo_url)
# Now try it again with prop normalization.
svntest.actions.load_repo(sbox, dump_str=dump_str,
bypass_prop_validation=False,
normalize_props=True)
# We should get the expected property value.
exit_code, output, _ = svntest.main.run_svn(None, 'pg', 'svn:log',
'--revprop', '-r1',
'--no-newline',
sbox.repo_url)
svntest.verify.verify_exit_code(None, exit_code, 0)
if output != ['\n', '\n']:
raise svntest.Failure("Unexpected property value %s" % output)
# This test intentionally corrupts a revision and assumes an FSFS
# repository. If you can make it work with BDB please do so.
# However, the verification triggered by this test is in the repos layer
# so it will trigger with either backend anyway.
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.server_enforces_UTF8_fspaths_in_verify)
def verify_non_utf8_paths(sbox):
"svnadmin verify with non-UTF-8 paths"
if svntest.main.options.fsfs_version is not None and \
svntest.main.options.fsfs_version not in [4, 6]:
raise svntest.Skip("Unsupported prepackaged repository version")
dumpfile = clean_dumpfile()
# Corruption only possible in physically addressed revisions created
# with pre-1.6 servers.
sbox.build(empty=True,
minor_version=min(svntest.main.options.server_minor_version,8))
# Load the dumpstream
load_and_verify_dumpstream(sbox, [], [], dumpfile_revisions, False,
dumpfile, '--ignore-uuid')
# Replace the path 'A' in revision 1 with a non-UTF-8 sequence.
# This has been observed in repositories in the wild, though Subversion
# 1.6 and greater should prevent such filenames from entering the repository.
path1 = os.path.join(sbox.repo_dir, "db", "revs", "0", "1")
path_new = os.path.join(sbox.repo_dir, "db", "revs", "0", "1.new")
fp1 = open(path1, 'rb')
fp_new = open(path_new, 'wb')
for line in fp1.readlines():
if line == b"A\n":
# replace 'A' with a latin1 character -- the new path is not valid UTF-8
fp_new.write(b"\xE6\n")
elif line == b"text: 1 340 32 32 a6be7b4cf075fd39e6a99eb69a31232b\n":
# phys, PLAIN directories: fix up the representation checksum
fp_new.write(b"text: 1 340 32 32 f2e93e73272cac0f18fccf16f224eb93\n")
elif line == b"text: 1 340 44 32 a6be7b4cf075fd39e6a99eb69a31232b\n":
# phys, deltified directories: fix up the representation checksum
fp_new.write(b"text: 1 340 44 32 f2e93e73272cac0f18fccf16f224eb93\n")
elif line == b"cpath: /A\n":
# also fix up the 'created path' field
fp_new.write(b"cpath: /\xE6\n")
elif line == b"_0.0.t0-0 add-file true true /A\n":
# and another occurrance
fp_new.write(b"_0.0.t0-0 add-file true true /\xE6\n")
else:
fp_new.write(line)
fp1.close()
fp_new.close()
os.remove(path1)
os.rename(path_new, path1)
# Verify the repository, expecting failure
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
svntest.verify.verify_outputs(
"Unexpected error while running 'svnadmin verify'.",
[], errput, None, ".*Path '.*' is not in UTF-8.*")
# Make sure the repository can still be dumped so that the
# encoding problem can be fixed in a dump/edit/load cycle.
expected_stderr = [
"* Dumped revision 0.\n",
"WARNING 0x0002: E160005: "
"While validating fspath '?\\E6': "
"Path '?\\E6' is not in UTF-8"
"\n",
"* Dumped revision 1.\n",
]
exit_code, output, errput = svntest.main.run_svnadmin("dump", sbox.repo_dir)
if svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump' is unexpected.",
'STDERR', expected_stderr, errput):
raise svntest.Failure
def test_lslocks_and_rmlocks(sbox):
"test 'svnadmin lslocks' and 'svnadmin rmlocks'"
sbox.build(create_wc=False)
iota_url = sbox.repo_url + '/iota'
lambda_url = sbox.repo_url + '/A/B/lambda'
exit_code, output, errput = svntest.main.run_svnadmin("lslocks",
sbox.repo_dir)
if exit_code or errput or output:
raise svntest.Failure("Error: 'lslocks' failed")
expected_output = svntest.verify.UnorderedRegexListOutput(
["'.*lambda' locked by user 'jrandom'.\n",
"'.*iota' locked by user 'jrandom'.\n"])
# Lock iota and A/B/lambda using svn client
svntest.actions.run_and_verify_svn(expected_output,
[], "lock", "-m", "Locking files",
iota_url, lambda_url)
def expected_output_list(path):
return [
"Path: " + path,
"UUID Token: opaquelocktoken:.*",
"Owner: jrandom",
"Created:.*",
"Expires:.*",
"Comment \(1 line\):",
"Locking files",
"\n", # empty line
]
# List all locks
exit_code, output, errput = svntest.main.run_svnadmin("lslocks",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
svntest.verify.verify_exit_code(None, exit_code, 0)
expected_output = svntest.verify.UnorderedRegexListOutput(
expected_output_list('/A/B/lambda') +
expected_output_list('/iota'))
svntest.verify.compare_and_display_lines('lslocks output mismatch',
'output',
expected_output, output)
# List lock in path /A
exit_code, output, errput = svntest.main.run_svnadmin("lslocks",
sbox.repo_dir,
"A")
if errput:
raise SVNUnexpectedStderr(errput)
expected_output = svntest.verify.RegexListOutput(
expected_output_list('/A/B/lambda'))
svntest.verify.compare_and_display_lines('lslocks output mismatch',
'output',
expected_output, output)
svntest.verify.verify_exit_code(None, exit_code, 0)
# Remove locks
exit_code, output, errput = svntest.main.run_svnadmin("rmlocks",
sbox.repo_dir,
"iota",
"A/B/lambda")
expected_output = UnorderedOutput(["Removed lock on '/iota'.\n",
"Removed lock on '/A/B/lambda'.\n"])
svntest.verify.verify_outputs(
"Unexpected output while running 'svnadmin rmlocks'.",
output, [], expected_output, None)
#----------------------------------------------------------------------
@Issue(3734)
def load_ranges(sbox):
"'svnadmin load --revision X:Y'"
## See https://issues.apache.org/jira/browse/SVN-3734. ##
sbox.build(empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'skeleton_repos.dump')
dumplines = svntest.actions.load_dumpfile(dumpfile_location)
# Load our dumpfile, 2 revisions at a time, verifying that we have
# the correct youngest revision after each load.
load_dumpstream(sbox, dumplines, '-r0:2')
svntest.actions.run_and_verify_svnlook(['2\n'],
None, 'youngest', sbox.repo_dir)
load_dumpstream(sbox, dumplines, '-r3:4')
svntest.actions.run_and_verify_svnlook(['4\n'],
None, 'youngest', sbox.repo_dir)
load_dumpstream(sbox, dumplines, '-r5:6')
svntest.actions.run_and_verify_svnlook(['6\n'],
None, 'youngest', sbox.repo_dir)
# There are ordering differences in the property blocks.
if (svntest.main.options.server_minor_version < 6):
temp = []
for line in dumplines:
if not "Text-content-sha1:" in line:
temp.append(line)
expected_dump = UnorderedOutput(temp)
else:
expected_dump = UnorderedOutput(dumplines)
new_dumpdata = svntest.actions.run_and_verify_dump(sbox.repo_dir)
svntest.verify.compare_and_display_lines("Dump files", "DUMP",
expected_dump, new_dumpdata)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def hotcopy_incremental(sbox):
"'svnadmin hotcopy --incremental PATH .'"
sbox.build()
backup_dir, backup_url = sbox.add_repo_path('backup')
os.mkdir(backup_dir)
cwd = os.getcwd()
for i in [1, 2, 3]:
os.chdir(backup_dir)
svntest.actions.run_and_verify_svnadmin(
None, [],
"hotcopy", "--incremental", os.path.join(cwd, sbox.repo_dir), '.')
os.chdir(cwd)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
if i < 3:
sbox.simple_mkdir("newdir-%i" % i)
sbox.simple_commit()
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.fs_has_pack)
def hotcopy_incremental_packed(sbox):
"'svnadmin hotcopy --incremental' with packing"
# Configure two files per shard to trigger packing.
sbox.build()
patch_format(sbox.repo_dir, shard_size=2)
backup_dir, backup_url = sbox.add_repo_path('backup')
os.mkdir(backup_dir)
cwd = os.getcwd()
# Pack revisions 0 and 1 if not already packed.
if not (svntest.main.is_fs_type_fsfs and svntest.main.options.fsfs_packing
and svntest.main.options.fsfs_sharding == 2):
svntest.actions.run_and_verify_svnadmin(
['Packing revisions in shard 0...done.\n'], [], "pack",
os.path.join(cwd, sbox.repo_dir))
# Commit 5 more revs, hotcopy and pack after each commit.
for i in [1, 2, 3, 4, 5]:
os.chdir(backup_dir)
svntest.actions.run_and_verify_svnadmin(
None, [],
"hotcopy", "--incremental", os.path.join(cwd, sbox.repo_dir), '.')
os.chdir(cwd)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
if i < 5:
sbox.simple_mkdir("newdir-%i" % i)
sbox.simple_commit()
if (svntest.main.is_fs_type_fsfs and not svntest.main.options.fsfs_packing
and not i % 2):
expected_output = ['Packing revisions in shard %d...done.\n' % (i/2)]
else:
expected_output = []
svntest.actions.run_and_verify_svnadmin(
expected_output, [], "pack", os.path.join(cwd, sbox.repo_dir))
def locking(sbox):
"svnadmin lock tests"
sbox.build(create_wc=False)
comment_path = os.path.join(svntest.main.temp_dir, "comment")
svntest.main.file_write(comment_path, "dummy comment")
invalid_comment_path = os.path.join(svntest.main.temp_dir, "invalid_comment")
svntest.main.file_write(invalid_comment_path, "character is invalid")
# Test illegal character in comment file.
expected_error = ".*svnadmin: E130004:.*"
svntest.actions.run_and_verify_svnadmin(None,
expected_error, "lock",
sbox.repo_dir,
"iota", "jrandom",
invalid_comment_path)
# Test locking path with --bypass-hooks
expected_output = "'/iota' locked by user 'jrandom'."
svntest.actions.run_and_verify_svnadmin(expected_output,
None, "lock",
sbox.repo_dir,
"iota", "jrandom",
comment_path,
"--bypass-hooks")
# Remove lock
svntest.actions.run_and_verify_svnadmin(None,
None, "rmlocks",
sbox.repo_dir, "iota")
# Test locking path without --bypass-hooks
expected_output = "'/iota' locked by user 'jrandom'."
svntest.actions.run_and_verify_svnadmin(expected_output,
None, "lock",
sbox.repo_dir,
"iota", "jrandom",
comment_path)
# Test locking already locked path.
expected_error = ".*svnadmin: E160035:.*"
svntest.actions.run_and_verify_svnadmin(None,
expected_error, "lock",
sbox.repo_dir,
"iota", "jrandom",
comment_path)
# Test locking non-existent path.
expected_error = ".*svnadmin: E160013:.*"
svntest.actions.run_and_verify_svnadmin(None,
expected_error, "lock",
sbox.repo_dir,
"non-existent", "jrandom",
comment_path)
# Test locking a path while specifying a lock token.
expected_output = "'/A/D/G/rho' locked by user 'jrandom'."
lock_token = "opaquelocktoken:01234567-89ab-cdef-89ab-cdef01234567"
svntest.actions.run_and_verify_svnadmin(expected_output,
None, "lock",
sbox.repo_dir,
"A/D/G/rho", "jrandom",
comment_path, lock_token)
# Test unlocking a path, but provide the wrong lock token.
expected_error = ".*svnadmin: E160040:.*"
wrong_lock_token = "opaquelocktoken:12345670-9ab8-defc-9ab8-def01234567c"
svntest.actions.run_and_verify_svnadmin(None,
expected_error, "unlock",
sbox.repo_dir,
"A/D/G/rho", "jrandom",
wrong_lock_token)
# Test unlocking the path again, but this time provide the correct
# lock token.
expected_output = "'/A/D/G/rho' unlocked by user 'jrandom'."
svntest.actions.run_and_verify_svnadmin(expected_output,
None, "unlock",
sbox.repo_dir,
"A/D/G/rho", "jrandom",
lock_token)
# Install lock/unlock prevention hooks.
hook_path = svntest.main.get_pre_lock_hook_path(sbox.repo_dir)
svntest.main.create_python_hook_script(hook_path, 'import sys; sys.exit(1)')
hook_path = svntest.main.get_pre_unlock_hook_path(sbox.repo_dir)
svntest.main.create_python_hook_script(hook_path, 'import sys; sys.exit(1)')
# Test locking a path. Don't use --bypass-hooks, though, as we wish
# to verify that hook script is really getting executed.
expected_error = ".*svnadmin: E165001:.*"
svntest.actions.run_and_verify_svnadmin(None,
expected_error, "lock",
sbox.repo_dir,
"iota", "jrandom",
comment_path)
# Fetch the lock token for our remaining locked path. (We didn't
# explicitly set it, so it will vary from test run to test run.)
exit_code, output, errput = svntest.main.run_svnadmin("lslocks",
sbox.repo_dir,
"iota")
iota_token = None
for line in output:
if line.startswith("UUID Token: opaquelocktoken:"):
iota_token = line[12:].rstrip()
break
if iota_token is None:
raise svntest.Failure("Unable to lookup lock token for 'iota'")
# Try to unlock a path while providing the correct lock token but
# with a preventative hook in place.
expected_error = ".*svnadmin: E165001:.*"
svntest.actions.run_and_verify_svnadmin(None,
expected_error, "unlock",
sbox.repo_dir,
"iota", "jrandom",
iota_token)
# Finally, use --bypass-hooks to unlock the path (again using the
# correct lock token).
expected_output = "'/iota' unlocked by user 'jrandom'."
svntest.actions.run_and_verify_svnadmin(expected_output,
None, "unlock",
"--bypass-hooks",
sbox.repo_dir,
"iota", "jrandom",
iota_token)
@SkipUnless(svntest.main.is_threaded_python)
@Issue(4129)
def mergeinfo_race(sbox):
"concurrent mergeinfo commits invalidate pred-count"
sbox.build()
# This test exercises two commit-time race condition bugs:
#
# (a) metadata corruption when concurrent commits change svn:mergeinfo (issue #4129)
# (b) false positive SVN_ERR_FS_CONFLICT error with httpv1 commits
# https://mail-archives.apache.org/mod_mbox/subversion-dev/201507.mbox/%[email protected]%3E
#
# Both bugs are timing-dependent and might not reproduce 100% of the time.
wc_dir = sbox.wc_dir
wc2_dir = sbox.add_wc_path('2')
## Create wc2.
svntest.main.run_svn(None, 'checkout', '-q', sbox.repo_url, wc2_dir)
## Some random edits.
svntest.main.run_svn(None, 'mkdir', sbox.ospath('d1', wc_dir))
svntest.main.run_svn(None, 'mkdir', sbox.ospath('d2', wc2_dir))
## Set random mergeinfo properties.
svntest.main.run_svn(None, 'ps', 'svn:mergeinfo', '/P:42', sbox.ospath('A', wc_dir))
svntest.main.run_svn(None, 'ps', 'svn:mergeinfo', '/Q:42', sbox.ospath('iota', wc2_dir))
def makethread(some_wc_dir):
def worker():
svntest.main.run_svn(None, 'commit', '-mm', some_wc_dir)
return worker
t1 = threading.Thread(None, makethread(wc_dir))
t2 = threading.Thread(None, makethread(wc2_dir))
# t2 will trigger the issue #4129 sanity check in fs_fs.c
t1.start(); t2.start()
t1.join(); t2.join()
# Crude attempt to make sure everything worked.
# TODO: better way to catch exceptions in the thread
if svntest.actions.run_and_parse_info(sbox.repo_url)[0]['Revision'] != '3':
raise svntest.Failure("one or both commits failed")
@Issue(4213)
@Skip(svntest.main.is_fs_type_fsx)
def recover_old_empty(sbox):
"recover empty --compatible-version=1.3"
sbox.build(create_wc=False, empty=True, minor_version=3)
svntest.actions.run_and_verify_svnadmin(None, [],
"recover", sbox.repo_dir)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_keep_going(sbox):
"svnadmin verify --keep-going test"
# No support for modifying pack files
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc = False)
repo_url = sbox.repo_url
B_url = sbox.repo_url + '/B'
C_url = sbox.repo_url + '/C'
# Create A/B/E/bravo in r2.
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
B_url)
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
C_url)
r2 = fsfs_file(sbox.repo_dir, 'revs', '2')
fp = open(r2, 'r+b')
fp.write(b"inserting junk to corrupt the rev")
fp.close()
exit_code, output, errput = svntest.main.run_svnadmin("verify",
"--keep-going",
sbox.repo_dir)
exp_out = svntest.verify.RegexListOutput([".*Verified revision 0.",
".*Verified revision 1.",
".*",
".*Summary.*",
".*r2: E160004:.*",
".*r2: E160004:.*",
".*r3: E160004:.*",
".*r3: E160004:.*"])
if (svntest.main.fs_has_rep_sharing()):
exp_out.insert(0, ".*Verifying.*metadata.*")
exp_err = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160004:.*",
"svnadmin: E160004:.*",
".*Error verifying revision 3.",
"svnadmin: E160004:.*",
"svnadmin: E160004:.*",
"svnadmin: E205012:.*"], False)
if (svntest.main.is_fs_log_addressing()):
exp_err.insert(0, ".*Error verifying repository metadata.")
exp_err.insert(1, "svnadmin: E160004:.*")
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.",
output, errput, exp_out, exp_err):
raise svntest.Failure
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
if (svntest.main.is_fs_log_addressing()):
exp_out = svntest.verify.RegexListOutput([".*Verifying metadata at revision 0.*"])
else:
exp_out = svntest.verify.RegexListOutput([".*Verified revision 0.",
".*Verified revision 1."])
if (svntest.main.fs_has_rep_sharing()):
exp_out.insert(0, ".*Verifying repository metadata.*")
if (svntest.main.is_fs_log_addressing()):
exp_err = svntest.verify.RegexListOutput([
".*Error verifying repository metadata.",
"svnadmin: E160004:.*"], False)
else:
exp_err = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160004:.*",
"svnadmin: E160004:.*"], False)
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.",
output, errput, exp_out, exp_err):
raise svntest.Failure
exit_code, output, errput = svntest.main.run_svnadmin("verify",
"--quiet",
sbox.repo_dir)
if (svntest.main.is_fs_log_addressing()):
exp_err = svntest.verify.RegexListOutput([
".*Error verifying repository metadata.",
"svnadmin: E160004:.*"], False)
else:
exp_err = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160004:.*",
"svnadmin: E160004:.*"], False)
if svntest.verify.verify_outputs("Output of 'svnadmin verify' is unexpected.",
None, errput, None, exp_err):
raise svntest.Failure
# Don't leave a corrupt repository
svntest.main.safe_rmtree(sbox.repo_dir, True)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_keep_going_quiet(sbox):
"svnadmin verify --keep-going --quiet test"
# No support for modifying pack files
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc = False)
repo_url = sbox.repo_url
B_url = sbox.repo_url + '/B'
C_url = sbox.repo_url + '/C'
# Create A/B/E/bravo in r2.
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
B_url)
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
C_url)
r2 = fsfs_file(sbox.repo_dir, 'revs', '2')
fp = open(r2, 'r+b')
fp.write(b"inserting junk to corrupt the rev")
fp.close()
exit_code, output, errput = svntest.main.run_svnadmin("verify",
"--keep-going",
"--quiet",
sbox.repo_dir)
exp_err = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160004:.*",
"svnadmin: E160004:.*",
".*Error verifying revision 3.",
"svnadmin: E160004:.*",
"svnadmin: E160004:.*",
"svnadmin: E205012:.*"], False)
# Insert another expected error from checksum verification
if (svntest.main.is_fs_log_addressing()):
exp_err.insert(0, ".*Error verifying repository metadata.")
exp_err.insert(1, "svnadmin: E160004:.*")
if svntest.verify.verify_outputs(
"Unexpected error while running 'svnadmin verify'.",
output, errput, None, exp_err):
raise svntest.Failure
# Don't leave a corrupt repository
svntest.main.safe_rmtree(sbox.repo_dir, True)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_invalid_path_changes(sbox):
"detect invalid changed path list entries"
# No support for modifying pack files
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc = False)
repo_url = sbox.repo_url
# Create a number of revisions each adding a single path
for r in range(2,20):
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
sbox.repo_url + '/B' + str(r))
# modify every other revision to make sure that errors are not simply
# "carried over" but that all corrupts we get detected independently
# add existing node
set_changed_path_list(sbox, 2,
b"_0.0.t1-1 add-dir false false /A\n\n")
# add into non-existent parent
set_changed_path_list(sbox, 4,
b"_0.0.t3-2 add-dir false false /C/X\n\n")
# del non-existent node
set_changed_path_list(sbox, 6,
b"_0.0.t5-2 delete-dir false false /C\n\n")
# del existent node of the wrong kind
#
# THIS WILL NOT BE DETECTED
# since dump mechanism and file don't care about the types of deleted nodes
set_changed_path_list(sbox, 8,
b"_0.0.t7-2 delete-file false false /B3\n\n")
# copy from non-existent node
set_changed_path_list(sbox, 10,
b"_0.0.t9-2 add-dir false false /B10\n6 /B8\n")
# copy from existing node of the wrong kind
set_changed_path_list(sbox, 12,
b"_0.0.t11-2 add-file false false /B12\n9 /B8\n")
# modify non-existent node
set_changed_path_list(sbox, 14,
b"_0.0.t13-2 modify-file false false /A/D/H/foo\n\n")
# modify existent node of the wrong kind
set_changed_path_list(sbox, 16,
b"_0.0.t15-2 modify-file false false /B12\n\n")
# replace non-existent node
set_changed_path_list(sbox, 18,
b"_0.0.t17-2 replace-file false false /A/D/H/foo\n\n")
# find corruptions
exit_code, output, errput = svntest.main.run_svnadmin("verify",
"--keep-going",
sbox.repo_dir)
# Errors generated by FSFS when CHANGED_PATHS is not forced into emulation
exp_out1 = svntest.verify.RegexListOutput([".*Verified revision 0.",
".*Verified revision 1.",
".*Verified revision 3.",
".*Verified revision 5.",
".*Verified revision 7.",
".*Verified revision 8.",
".*Verified revision 9.",
".*Verified revision 11.",
".*Verified revision 13.",
".*Verified revision 15.",
".*Verified revision 17.",
".*Verified revision 19.",
".*",
".*Summary.*",
".*r2: E160020:.*",
".*r2: E160020:.*",
".*r4: E160013:.*",
".*r6: E160013:.*",
".*r6: E160013:.*",
".*r10: E160013:.*",
".*r10: E160013:.*",
".*r12: E145001:.*",
".*r12: E145001:.*",
".*r14: E160013:.*",
".*r14: E160013:.*",
".*r16: E145001:.*",
".*r16: E145001:.*",
".*r18: E160013:.*",
".*r18: E160013:.*"])
exp_err1 = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160020:.*",
"svnadmin: E160020:.*",
".*Error verifying revision 4.",
"svnadmin: E160013:.*",
".*Error verifying revision 6.",
"svnadmin: E160013:.*",
"svnadmin: E160013:.*",
".*Error verifying revision 10.",
"svnadmin: E160013:.*",
"svnadmin: E160013:.*",
".*Error verifying revision 12.",
"svnadmin: E145001:.*",
"svnadmin: E145001:.*",
".*Error verifying revision 14.",
"svnadmin: E160013:.*",
"svnadmin: E160013:.*",
".*Error verifying revision 16.",
"svnadmin: E145001:.*",
"svnadmin: E145001:.*",
".*Error verifying revision 18.",
"svnadmin: E160013:.*",
"svnadmin: E160013:.*",
"svnadmin: E205012:.*"], False)
# If CHANGED_PATHS is emulated, FSFS fails earlier, generating fewer
# of the same messages per revision.
exp_out2 = svntest.verify.RegexListOutput([".*Verified revision 0.",
".*Verified revision 1.",
".*Verified revision 3.",
".*Verified revision 5.",
".*Verified revision 7.",
".*Verified revision 8.",
".*Verified revision 9.",
".*Verified revision 11.",
".*Verified revision 13.",
".*Verified revision 15.",
".*Verified revision 17.",
".*Verified revision 19.",
".*",
".*Summary.*",
".*r2: E160020:.*",
".*r2: E160020:.*",
".*r4: E160013:.*",
".*r6: E160013:.*",
".*r10: E160013:.*",
".*r10: E160013:.*",
".*r12: E145001:.*",
".*r12: E145001:.*",
".*r14: E160013:.*",
".*r16: E145001:.*",
".*r16: E145001:.*",
".*r18: E160013:.*"])
exp_err2 = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160020:.*",
"svnadmin: E160020:.*",
".*Error verifying revision 4.",
"svnadmin: E160013:.*",
".*Error verifying revision 6.",
"svnadmin: E160013:.*",
".*Error verifying revision 10.",
"svnadmin: E160013:.*",
"svnadmin: E160013:.*",
".*Error verifying revision 12.",
"svnadmin: E145001:.*",
"svnadmin: E145001:.*",
".*Error verifying revision 14.",
"svnadmin: E160013:.*",
".*Error verifying revision 16.",
"svnadmin: E145001:.*",
"svnadmin: E145001:.*",
".*Error verifying revision 18.",
"svnadmin: E160013:.*",
"svnadmin: E205012:.*"], False)
# Determine which pattern to use.
# Note that index() will throw an exception if the string can't be found.
try:
rev6_line = errput.index('* Error verifying revision 6.\n');
rev10_line = errput.index('* Error verifying revision 10.\n');
error_count = 0
for line in errput[rev6_line+1:rev10_line]:
if "svnadmin: E" in line:
error_count = error_count + 1
if error_count == 1:
exp_out = exp_out2
exp_err = exp_err2
else:
exp_out = exp_out1
exp_err = exp_err1
except ValueError:
exp_out = exp_out1
exp_err = exp_err1
if (svntest.main.fs_has_rep_sharing()):
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.main.options.fsfs_sharding is not None:
for x in range(0, 19 / svntest.main.options.fsfs_sharding):
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.main.is_fs_log_addressing():
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.",
output, errput, exp_out, exp_err):
raise svntest.Failure
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
exp_out = svntest.verify.RegexListOutput([".*Verified revision 0.",
".*Verified revision 1."])
exp_err = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160020:.*",
"svnadmin: E160020:.*"], False)
if (svntest.main.fs_has_rep_sharing()):
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.main.options.fsfs_sharding is not None:
for x in range(0, 19 / svntest.main.options.fsfs_sharding):
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.main.is_fs_log_addressing():
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.",
output, errput, exp_out, exp_err):
raise svntest.Failure
exit_code, output, errput = svntest.main.run_svnadmin("verify",
"--quiet",
sbox.repo_dir)
exp_out = []
exp_err = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160020:.*",
"svnadmin: E160020:.*"], False)
if svntest.verify.verify_outputs("Output of 'svnadmin verify' is unexpected.",
output, errput, exp_out, exp_err):
raise svntest.Failure
# Don't leave a corrupt repository
svntest.main.safe_rmtree(sbox.repo_dir, True)
def verify_denormalized_names(sbox):
"detect denormalized names and name collisions"
sbox.build(create_wc=False, empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'normalization_check.dump')
load_dumpstream(sbox, svntest.actions.load_dumpfile(dumpfile_location))
exit_code, output, errput = svntest.main.run_svnadmin(
"verify", "--check-normalization", sbox.repo_dir)
expected_output_regex_list = [
".*Verified revision 0.",
".*Verified revision 1.",
".*Verified revision 2.",
".*Verified revision 3.",
# A/{Eacute}/{aring}lpha
"WARNING 0x0003: Duplicate representation of path 'A/.*/.*lpha'",
".*Verified revision 4.",
".*Verified revision 5.",
# Q/{aring}lpha
"WARNING 0x0004: Duplicate representation of path '/Q/.*lpha'"
# A/{Eacute}
" in svn:mergeinfo property of 'A/.*'",
".*Verified revision 6.",
".*Verified revision 7."]
# The BDB backend doesn't do global metadata verification.
if (svntest.main.fs_has_rep_sharing() and not svntest.main.is_fs_type_bdb()):
expected_output_regex_list.insert(0, ".*Verifying repository metadata.*")
if svntest.main.options.fsfs_sharding is not None:
for x in range(0, 7 / svntest.main.options.fsfs_sharding):
expected_output_regex_list.insert(0, ".*Verifying.*metadata.*")
if svntest.main.is_fs_log_addressing():
expected_output_regex_list.insert(0, ".* Verifying metadata at revision 0.*")
exp_out = svntest.verify.RegexListOutput(expected_output_regex_list)
exp_err = svntest.verify.ExpectedOutput([])
svntest.verify.verify_outputs(
"Unexpected error while running 'svnadmin verify'.",
output, errput, exp_out, exp_err)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_recover_old_non_empty(sbox):
"fsfs recover non-empty --compatible-version=1.3"
# Around trunk@1560210, 'svnadmin recover' wrongly errored out
# for the --compatible-version=1.3 Greek tree repository:
# svnadmin: E200002: Serialized hash missing terminator
sbox.build(create_wc=False, minor_version=3)
svntest.actions.run_and_verify_svnadmin(None, [], "recover",
sbox.repo_dir)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_old_non_empty(sbox):
"fsfs hotcopy non-empty --compatible-version=1.3"
# Around trunk@1560210, 'svnadmin hotcopy' wrongly errored out
# for the --compatible-version=1.3 Greek tree repository:
# svnadmin: E160006: No such revision 1
sbox.build(create_wc=False, minor_version=3)
backup_dir, backup_url = sbox.add_repo_path('backup')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
def load_ignore_dates(sbox):
"svnadmin load --ignore-dates"
# All revisions in the loaded repository should come after this time.
start_time = time.localtime()
time.sleep(1)
sbox.build(create_wc=False, empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'skeleton_repos.dump')
dumpfile_skeleton = svntest.actions.load_dumpfile(dumpfile_location)
load_dumpstream(sbox, dumpfile_skeleton, '--ignore-dates')
svntest.actions.run_and_verify_svnlook(['6\n'],
None, 'youngest', sbox.repo_dir)
for rev in range(1, 6):
exit_code, output, errput = svntest.main.run_svnlook('date', '-r', rev,
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
rev_time = time.strptime(output[0].rstrip()[:19], '%Y-%m-%d %H:%M:%S')
if rev_time < start_time:
raise svntest.Failure("Revision time for r%d older than load start time\n"
" rev_time: %s\n"
" start_time: %s"
% (rev, str(rev_time), str(start_time)))
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_old_with_id_changes(sbox):
"fsfs hotcopy old with node-id and copy-id changes"
# Around trunk@1573728, running 'svnadmin hotcopy' for the
# --compatible-version=1.3 repository with certain node-id and copy-id
# changes ended with mismatching db/current in source and destination:
#
# source: "2 l 1" destination: "2 k 1",
# "3 l 2" "3 4 2"
# (and so on...)
#
# We test this case by creating a --compatible-version=1.3 repository
# and committing things that result in node-id and copy-id changes.
# After every commit, we hotcopy the repository to a new destination
# and check whether the source of the backup and the backup itself are
# identical. We also maintain a separate --incremental backup, which
# is updated and checked after every commit.
sbox.build(create_wc=True, minor_version=3)
inc_backup_dir, inc_backup_url = sbox.add_repo_path('incremental-backup')
# r1 = Initial greek tree sandbox.
backup_dir, backup_url = sbox.add_repo_path('backup-after-r1')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
# r2 = Add a new property.
sbox.simple_propset('foo', 'bar', 'A/mu')
sbox.simple_commit(message='r2')
backup_dir, backup_url = sbox.add_repo_path('backup-after-r2')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
# r3 = Copy a file.
sbox.simple_copy('A/B/E', 'A/B/E1')
sbox.simple_commit(message='r3')
backup_dir, backup_url = sbox.add_repo_path('backup-after-r3')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
# r4 = Remove an existing file ...
sbox.simple_rm('A/D/gamma')
sbox.simple_commit(message='r4')
backup_dir, backup_url = sbox.add_repo_path('backup-after-r4')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
# r5 = ...and replace it with a new file here.
sbox.simple_add_text("This is the replaced file.\n", 'A/D/gamma')
sbox.simple_commit(message='r5')
backup_dir, backup_url = sbox.add_repo_path('backup-after-r5')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
# r6 = Add an entirely new file.
sbox.simple_add_text('This is an entirely new file.\n', 'A/C/mu1')
sbox.simple_commit(message='r6')
backup_dir, backup_url = sbox.add_repo_path('backup-after-r6')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
# r7 = Change the content of the existing file (this changeset does
# not bump the next-id and copy-id counters in the repository).
sbox.simple_append('A/mu', 'This is change in the existing file.\n')
sbox.simple_commit(message='r7')
backup_dir, backup_url = sbox.add_repo_path('backup-after-r7')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
@SkipUnless(svntest.main.fs_has_pack)
def verify_packed(sbox):
"verify packed with small shards"
# Configure two files per shard to trigger packing.
sbox.build()
patch_format(sbox.repo_dir, shard_size=2)
# Play with our greek tree. These changesets fall into two
# separate shards with r2 and r3 being in shard 1 ...
sbox.simple_append('iota', "Line.\n")
sbox.simple_append('A/D/gamma', "Another line.\n")
sbox.simple_commit(message='r2')
sbox.simple_propset('foo', 'bar', 'iota')
sbox.simple_propset('foo', 'baz', 'A/mu')
sbox.simple_commit(message='r3')
# ...and r4 and r5 being in shard 2.
sbox.simple_rm('A/C')
sbox.simple_copy('A/B/E', 'A/B/E1')
sbox.simple_move('A/mu', 'A/B/mu')
sbox.simple_commit(message='r4')
sbox.simple_propdel('foo', 'A/B/mu')
sbox.simple_commit(message='r5')
if svntest.main.is_fs_type_fsfs and svntest.main.options.fsfs_packing:
# With --fsfs-packing, everything is already packed and we
# can skip this part.
pass
else:
expected_output = ["Packing revisions in shard 0...done.\n",
"Packing revisions in shard 1...done.\n",
"Packing revisions in shard 2...done.\n"]
svntest.actions.run_and_verify_svnadmin(expected_output, [],
"pack", sbox.repo_dir)
if svntest.main.is_fs_log_addressing():
expected_output = ["* Verifying metadata at revision 0 ...\n",
"* Verifying metadata at revision 2 ...\n",
"* Verifying metadata at revision 4 ...\n",
"* Verifying repository metadata ...\n",
"* Verified revision 0.\n",
"* Verified revision 1.\n",
"* Verified revision 2.\n",
"* Verified revision 3.\n",
"* Verified revision 4.\n",
"* Verified revision 5.\n"]
else:
expected_output = ["* Verifying repository metadata ...\n",
"* Verified revision 0.\n",
"* Verified revision 1.\n",
"* Verified revision 2.\n",
"* Verified revision 3.\n",
"* Verified revision 4.\n",
"* Verified revision 5.\n"]
svntest.actions.run_and_verify_svnadmin(expected_output, [],
"verify", sbox.repo_dir)
# Test that 'svnadmin freeze' is nestable. (For example, this ensures it
# won't take system-global locks, only repository-scoped ones.)
#
# This could be useful to easily freeze a small number of repositories at once.
#
# ### We don't actually test that freeze takes a write lock anywhere (not even
# ### in C tests.)
def freeze_freeze(sbox):
"svnadmin freeze svnadmin freeze (some-cmd)"
sbox.build(create_wc=False, read_only=True)
second_repo_dir, _ = sbox.add_repo_path('backup')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, second_repo_dir)
if svntest.main.is_fs_type_fsx() or \
(svntest.main.is_fs_type_fsfs() and \
svntest.main.options.server_minor_version < 9):
# FSFS repositories created with --compatible-version=1.8 and less
# erroneously share the filesystem data (locks, shared transaction
# data, ...) between hotcopy source and destination. This is fixed
# for new FS formats, but in order to avoid a deadlock for old formats,
# we have to manually assign a new UUID for the hotcopy destination.
# As of trunk@1618024, the same applies to FSX repositories.
svntest.actions.run_and_verify_svnadmin([], None,
'setuuid', second_repo_dir)
svntest.actions.run_and_verify_svnadmin(None, [],
'freeze', '--', sbox.repo_dir,
svntest.main.svnadmin_binary, 'freeze', '--', second_repo_dir,
sys.executable, '-c', 'True')
arg_file = sbox.get_tempname()
svntest.main.file_write(arg_file,
"%s\n%s\n" % (sbox.repo_dir, second_repo_dir))
svntest.actions.run_and_verify_svnadmin(None, [],
'freeze', '-F', arg_file, '--',
sys.executable, '-c', 'True')
def verify_metadata_only(sbox):
"verify metadata only"
sbox.build(create_wc = False)
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir,
"--metadata-only")
if errput:
raise SVNUnexpectedStderr(errput)
# Unfortunately, older formats won't test as thoroughly than newer ones
# resulting in different progress output. BDB will do a full check but
# not produce any output.
if svntest.main.is_fs_log_addressing():
svntest.verify.compare_and_display_lines(
"Unexpected error while running 'svnadmin verify'.",
'STDOUT', ["* Verifying metadata at revision 0 ...\n",
"* Verifying repository metadata ...\n"], output)
elif svntest.main.fs_has_rep_sharing() \
and not svntest.main.is_fs_type_bdb():
svntest.verify.compare_and_display_lines(
"Unexpected error while running 'svnadmin verify'.",
'STDOUT', ["* Verifying repository metadata ...\n"], output)
else:
svntest.verify.compare_and_display_lines(
"Unexpected error while running 'svnadmin verify'.",
'STDOUT', [], output)
@Skip(svntest.main.is_fs_type_bdb)
def verify_quickly(sbox):
"verify quickly using metadata"
sbox.build(create_wc = False)
rev_file = open(fsfs_file(sbox.repo_dir, 'revs', '1'), 'r+b')
# set new contents
rev_file.seek(8)
rev_file.write(b'#')
rev_file.close()
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir,
"--metadata-only")
# unfortunately, some backends needs to do more checks than other
# resulting in different progress output
if svntest.main.is_fs_log_addressing():
exp_out = svntest.verify.RegexListOutput([])
exp_err = svntest.verify.RegexListOutput(["svnadmin: E160004:.*"], False)
else:
exp_out = svntest.verify.RegexListOutput([])
exp_err = svntest.verify.RegexListOutput([])
if (svntest.main.fs_has_rep_sharing()):
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.",
output, errput, exp_out, exp_err):
raise svntest.Failure
# Don't leave a corrupt repository
svntest.main.safe_rmtree(sbox.repo_dir, True)
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.fs_has_pack)
def fsfs_hotcopy_progress(sbox):
"hotcopy progress reporting"
# Check how 'svnadmin hotcopy' reports progress for non-incremental
# and incremental scenarios. The progress output can be affected by
# the --fsfs-packing option, so skip the test if that is the case.
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
# Create an empty repository, configure three files per shard.
sbox.build(create_wc=False, empty=True)
patch_format(sbox.repo_dir, shard_size=3)
inc_backup_dir, inc_backup_url = sbox.add_repo_path('incremental-backup')
# Nothing really exciting for the empty repository.
expected_full = [
"* Copied revision 0.\n"
]
expected_incremental = [
"* Copied revision 0.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup-0')
svntest.actions.run_and_verify_svnadmin(expected_full, [],
'hotcopy',
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [],
'hotcopy', '--incremental',
sbox.repo_dir, inc_backup_dir)
# Commit three revisions. After this step we have a full shard
# (r0, r1, r2) and the second shard (r3) with a single revision.
for i in range(3):
svntest.actions.run_and_verify_svn(None, [], 'mkdir',
'-m', svntest.main.make_log_msg(),
sbox.repo_url + '/dir-%i' % i)
expected_full = [
"* Copied revision 0.\n",
"* Copied revision 1.\n",
"* Copied revision 2.\n",
"* Copied revision 3.\n",
]
expected_incremental = [
"* Copied revision 1.\n",
"* Copied revision 2.\n",
"* Copied revision 3.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup-1')
svntest.actions.run_and_verify_svnadmin(expected_full, [],
'hotcopy',
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [],
'hotcopy', '--incremental',
sbox.repo_dir, inc_backup_dir)
# Pack everything (r3 is still unpacked) and hotcopy again. In this case,
# the --incremental output should track the incoming (r0, r1, r2) pack and
# should not mention r3, because it is already a part of the destination
# and is *not* a part of the incoming pack.
svntest.actions.run_and_verify_svnadmin(None, [], 'pack',
sbox.repo_dir)
expected_full = [
"* Copied revisions from 0 to 2.\n",
"* Copied revision 3.\n",
]
expected_incremental = [
"* Copied revisions from 0 to 2.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup-2')
svntest.actions.run_and_verify_svnadmin(expected_full, [],
'hotcopy',
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [],
'hotcopy', '--incremental',
sbox.repo_dir, inc_backup_dir)
# Fill the second shard, pack again, commit several unpacked revisions
# on top of it. Rerun the hotcopy and check the progress output.
for i in range(4, 6):
svntest.actions.run_and_verify_svn(None, [], 'mkdir',
'-m', svntest.main.make_log_msg(),
sbox.repo_url + '/dir-%i' % i)
svntest.actions.run_and_verify_svnadmin(None, [], 'pack',
sbox.repo_dir)
for i in range(6, 8):
svntest.actions.run_and_verify_svn(None, [], 'mkdir',
'-m', svntest.main.make_log_msg(),
sbox.repo_url + '/dir-%i' % i)
expected_full = [
"* Copied revisions from 0 to 2.\n",
"* Copied revisions from 3 to 5.\n",
"* Copied revision 6.\n",
"* Copied revision 7.\n",
]
expected_incremental = [
"* Copied revisions from 3 to 5.\n",
"* Copied revision 6.\n",
"* Copied revision 7.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup-3')
svntest.actions.run_and_verify_svnadmin(expected_full, [],
'hotcopy',
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [],
'hotcopy', '--incremental',
sbox.repo_dir, inc_backup_dir)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_progress_with_revprop_changes(sbox):
"incremental hotcopy progress with changed revprops"
# The progress output can be affected by the --fsfs-packing
# option, so skip the test if that is the case.
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
# Create an empty repository, commit several revisions and hotcopy it.
sbox.build(create_wc=False, empty=True)
for i in range(6):
svntest.actions.run_and_verify_svn(None, [], 'mkdir',
'-m', svntest.main.make_log_msg(),
sbox.repo_url + '/dir-%i' % i)
expected_output = [
"* Copied revision 0.\n",
"* Copied revision 1.\n",
"* Copied revision 2.\n",
"* Copied revision 3.\n",
"* Copied revision 4.\n",
"* Copied revision 5.\n",
"* Copied revision 6.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup')
svntest.actions.run_and_verify_svnadmin(expected_output, [],
'hotcopy',
sbox.repo_dir, backup_dir)
# Amend a few log messages in the source, run the --incremental hotcopy.
# The progress output should only mention the corresponding revisions.
revprop_file = sbox.get_tempname()
svntest.main.file_write(revprop_file, "Modified log message.")
for i in [1, 3, 6]:
svntest.actions.run_and_verify_svnadmin(None, [],
'setrevprop',
sbox.repo_dir, '-r', i,
'svn:log', revprop_file)
expected_output = [
"* Copied revision 1.\n",
"* Copied revision 3.\n",
"* Copied revision 6.\n",
]
svntest.actions.run_and_verify_svnadmin(expected_output, [],
'hotcopy', '--incremental',
sbox.repo_dir, backup_dir)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_progress_old(sbox):
"hotcopy --compatible-version=1.3 progress"
sbox.build(create_wc=False, empty=True, minor_version=3)
inc_backup_dir, inc_backup_url = sbox.add_repo_path('incremental-backup')
# Nothing really exciting for the empty repository.
expected_full = [
"* Copied revision 0.\n"
]
expected_incremental = [
"* Copied revision 0.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup-0')
svntest.actions.run_and_verify_svnadmin(expected_full, [],
'hotcopy',
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [],
'hotcopy', '--incremental',
sbox.repo_dir, inc_backup_dir)
# Commit three revisions, hotcopy and check the progress output.
for i in range(3):
svntest.actions.run_and_verify_svn(None, [], 'mkdir',
'-m', svntest.main.make_log_msg(),
sbox.repo_url + '/dir-%i' % i)
expected_full = [
"* Copied revision 0.\n",
"* Copied revision 1.\n",
"* Copied revision 2.\n",
"* Copied revision 3.\n",
]
expected_incremental = [
"* Copied revision 1.\n",
"* Copied revision 2.\n",
"* Copied revision 3.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup-1')
svntest.actions.run_and_verify_svnadmin(expected_full, [],
'hotcopy',
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [],
'hotcopy', '--incremental',
sbox.repo_dir, inc_backup_dir)
@SkipUnless(svntest.main.fs_has_unique_freeze)
def freeze_same_uuid(sbox):
"freeze multiple repositories with same UUID"
sbox.build(create_wc=False)
first_repo_dir, _ = sbox.add_repo_path('first')
second_repo_dir, _ = sbox.add_repo_path('second')
# Test that 'svnadmin freeze A (svnadmin freeze B)' does not deadlock for
# new FSFS formats, even if 'A' and 'B' share the same UUID. Create two
# repositories by loading the same dump file, ...
svntest.main.create_repos(first_repo_dir)
svntest.main.create_repos(second_repo_dir)
dump_path = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'skeleton_repos.dump')
dump_contents = open(dump_path, 'rb').readlines()
svntest.actions.run_and_verify_load(first_repo_dir, dump_contents)
svntest.actions.run_and_verify_load(second_repo_dir, dump_contents)
# ...and execute the 'svnadmin freeze -F' command.
arg_file = sbox.get_tempname()
svntest.main.file_write(arg_file,
"%s\n%s\n" % (first_repo_dir, second_repo_dir))
svntest.actions.run_and_verify_svnadmin(None, None,
'freeze', '-F', arg_file, '--',
sys.executable, '-c', 'True')
@Skip(svntest.main.is_fs_type_fsx)
def upgrade(sbox):
"upgrade --compatible-version=1.3"
sbox.build(create_wc=False, minor_version=3)
svntest.actions.run_and_verify_svnadmin(None, [], "upgrade",
sbox.repo_dir)
# Does the repository work after upgrade?
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 2.\n'], [], 'mkdir',
'-m', svntest.main.make_log_msg(),
sbox.repo_url + '/dir')
def load_txdelta(sbox):
"exercising svn_txdelta_target on BDB"
sbox.build(empty=True)
# This dumpfile produced a BDB repository that generated cheksum
# mismatches on read caused by the improper handling of
# svn_txdelta_target ops. The bug was fixed by r1640832.
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'load_txdelta.dump.gz')
dumpfile = gzip.open(dumpfile_location, "rb").readlines()
load_dumpstream(sbox, dumpfile)
# Verify would fail with a checksum mismatch:
# * Error verifying revision 14.
# svnadmin: E200014: MD5 checksum mismatch on representation 'r':
# expected: 5182e8876ed894dc7fe28f6ff5b2fee6
# actual: 5121f82875508863ad70daa8244e6947
exit_code, output, errput = svntest.main.run_svnadmin("verify", sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
if svntest.verify.verify_outputs(
"Output of 'svnadmin verify' is unexpected.", None, output, None,
".*Verified revision *"):
raise svntest.Failure
@Issues(4563)
def load_no_svndate_r0(sbox):
"load without svn:date on r0"
sbox.build(create_wc=False, empty=True)
# svn:date exits
svntest.actions.run_and_verify_svnlook([' svn:date\n'], [],
'proplist', '--revprop', '-r0',
sbox.repo_dir)
dump_old = [b"SVN-fs-dump-format-version: 2\n", b"\n",
b"UUID: bf52886d-358d-4493-a414-944a6e5ad4f5\n", b"\n",
b"Revision-number: 0\n",
b"Prop-content-length: 10\n",
b"Content-length: 10\n", b"\n",
b"PROPS-END\n", b"\n"]
svntest.actions.run_and_verify_load(sbox.repo_dir, dump_old)
# svn:date should have been removed
svntest.actions.run_and_verify_svnlook([], [],
'proplist', '--revprop', '-r0',
sbox.repo_dir)
# This is only supported for FSFS
# The port to FSX is still pending, BDB won't support it.
@SkipUnless(svntest.main.is_fs_type_fsfs)
def hotcopy_read_only(sbox):
"'svnadmin hotcopy' a read-only source repository"
sbox.build()
svntest.main.chmod_tree(sbox.repo_dir, 0, svntest.main.S_ALL_WRITE)
backup_dir, backup_url = sbox.add_repo_path('backup')
exit_code, output, errput = svntest.main.run_svnadmin("hotcopy",
sbox.repo_dir,
backup_dir)
# r/o repos are hard to clean up. Make it writable again.
svntest.main.chmod_tree(sbox.repo_dir, svntest.main.S_ALL_WRITE,
svntest.main.S_ALL_WRITE)
if errput:
logger.warn("Error: hotcopy failed")
raise SVNUnexpectedStderr(errput)
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.fs_has_pack)
def fsfs_pack_non_sharded(sbox):
"'svnadmin pack' on a non-sharded repository"
# Configure two files per shard to trigger packing.
sbox.build(create_wc = False,
minor_version = min(svntest.main.options.server_minor_version,3))
# Skip for pre-cooked sharded repositories
if is_sharded(sbox.repo_dir):
raise svntest.Skip('sharded pre-cooked repository')
svntest.actions.run_and_verify_svnadmin(
None, [], "upgrade", sbox.repo_dir)
svntest.actions.run_and_verify_svnadmin(
['svnadmin: Warning - this repository is not sharded. Packing has no effect.\n'],
[], "pack", sbox.repo_dir)
def load_revprops(sbox):
"svnadmin load-revprops"
sbox.build(create_wc=False, empty=True)
dump_path = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'skeleton_repos.dump')
dump_contents = open(dump_path, 'rb').readlines()
load_and_verify_dumpstream(sbox, None, [], None, False, dump_contents)
svntest.actions.run_and_verify_svnlook(['Initial setup...\n', '\n'],
[], 'log', '-r1', sbox.repo_dir)
# After loading the dump, amend one of the log message in the repository.
input_file = sbox.get_tempname()
svntest.main.file_write(input_file, 'Modified log message...\n')
svntest.actions.run_and_verify_svnadmin([], [], 'setlog', '--bypass-hooks',
'-r1', sbox.repo_dir, input_file)
svntest.actions.run_and_verify_svnlook(['Modified log message...\n', '\n'],
[], 'log', '-r1', sbox.repo_dir)
# Load the same dump, but with 'svnadmin load-revprops'. Doing so should
# restore the log message to its original state.
svntest.main.run_command_stdin(svntest.main.svnadmin_binary, None, 0,
True, dump_contents, 'load-revprops',
sbox.repo_dir)
svntest.actions.run_and_verify_svnlook(['Initial setup...\n', '\n'],
[], 'log', '-r1', sbox.repo_dir)
def dump_revprops(sbox):
"svnadmin dump-revprops"
sbox.build(create_wc=False)
# Dump revprops only.
exit_code, dump_contents, errput = \
svntest.actions.run_and_verify_svnadmin(None, [], "dump-revprops", "-q",
sbox.repo_dir)
# We expect the dump to contain no path changes
for line in dump_contents:
if line.find(b"Node-path: ") > -1:
logger.warn("Error: path change found in revprops-only dump.")
raise svntest.Failure
# Remember the current log message for r1
exit_code, log_msg, errput = \
svntest.actions.run_and_verify_svnlook(None, [], 'log', '-r1',
sbox.repo_dir)
# Now, change the log message in the repository.
input_file = sbox.get_tempname()
svntest.main.file_write(input_file, 'Modified log message...\n')
svntest.actions.run_and_verify_svnadmin([], [], 'setlog', '--bypass-hooks',
'-r1', sbox.repo_dir, input_file)
svntest.actions.run_and_verify_svnlook(['Modified log message...\n', '\n'],
[], 'log', '-r1', sbox.repo_dir)
# Load the same dump with 'svnadmin load-revprops'. Doing so should
# restore the log message to its original state.
svntest.main.run_command_stdin(svntest.main.svnadmin_binary, None, 0,
True, dump_contents, 'load-revprops',
sbox.repo_dir)
svntest.actions.run_and_verify_svnlook(log_msg, [], 'log', '-r1',
sbox.repo_dir)
@XFail(svntest.main.is_fs_type_fsx)
@Issue(4598)
def dump_no_op_change(sbox):
"svnadmin dump with no-op changes"
sbox.build(create_wc=False, empty=True)
empty_file = sbox.get_tempname()
svntest.main.file_write(empty_file, '')
svntest.actions.run_and_verify_svnmucc(None, [],
'-U', sbox.repo_url,
'-m', svntest.main.make_log_msg(),
'put', empty_file, 'bar')
# Commit a no-op change.
svntest.actions.run_and_verify_svnmucc(None, [],
'-U', sbox.repo_url,
'-m', svntest.main.make_log_msg(),
'put', empty_file, 'bar')
# Dump and load the repository.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# We expect svn log -v to yield identical results for both original and
# reconstructed repositories. This used to fail as described in the
# Issue 4598 (https://issues.apache.org/jira/browse/SVN-4598), at least
# around r1706415.
#
# Test svn log -v for r2:
_, expected, _ = svntest.actions.run_and_verify_svn(None, [], 'log', '-v',
'-r2', sbox.repo_url)
found = [True for line in expected if line.find('M /bar\n') != -1]
if not found:
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v',
'-r2', sbox2.repo_url)
# Test svn log -v for /bar:
_, expected, _ = svntest.actions.run_and_verify_svn(None, [], 'log', '-v',
sbox.repo_url + '/bar')
found = [True for line in expected if line.find('M /bar\n') != -1]
if not found:
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v',
sbox2.repo_url + '/bar')
@XFail(svntest.main.is_fs_type_bdb)
@XFail(svntest.main.is_fs_type_fsx)
@Issue(4623)
def dump_no_op_prop_change(sbox):
"svnadmin dump with no-op property change"
sbox.build(create_wc=False, empty=True)
empty_file = sbox.get_tempname()
svntest.main.file_write(empty_file, '')
svntest.actions.run_and_verify_svnmucc(None, [],
'-U', sbox.repo_url,
'-m', svntest.main.make_log_msg(),
'put', empty_file, 'bar',
'propset', 'pname', 'pval', 'bar')
# Commit a no-op property change.
svntest.actions.run_and_verify_svnmucc(None, [],
'-U', sbox.repo_url,
'-m', svntest.main.make_log_msg(),
'propset', 'pname', 'pval', 'bar')
# Dump and load the repository.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Test svn log -v for r2:
_, expected, _ = svntest.actions.run_and_verify_svn(None, [], 'log', '-v',
'-r2', sbox.repo_url)
found = [True for line in expected if line.find('M /bar\n') != -1]
if not found:
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v',
'-r2', sbox2.repo_url)
# Test svn log -v for /bar:
_, expected, _ = svntest.actions.run_and_verify_svn(None, [], 'log', '-v',
sbox.repo_url + '/bar')
found = [True for line in expected if line.find('M /bar\n') != -1]
if not found:
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v',
sbox2.repo_url + '/bar')
def load_no_flush_to_disk(sbox):
"svnadmin load --no-flush-to-disk"
sbox.build(empty=True)
# Can't test the "not flushing to disk part", but loading the
# dump should work.
dump = clean_dumpfile()
expected = [
svntest.wc.State('', {
'A' : svntest.wc.StateItem(contents="text\n",
props={'svn:keywords': 'Id'})
})
]
load_and_verify_dumpstream(sbox, [], [], expected, True, dump,
'--no-flush-to-disk', '--ignore-uuid')
def dump_to_file(sbox):
"svnadmin dump --file ARG"
sbox.build(create_wc=False, empty=False)
expected_dump = svntest.actions.run_and_verify_dump(sbox.repo_dir)
file = sbox.get_tempname()
svntest.actions.run_and_verify_svnadmin2([],
["* Dumped revision 0.\n",
"* Dumped revision 1.\n"],
0, 'dump', '--file', file,
sbox.repo_dir)
actual_dump = open(file, 'rb').readlines()
svntest.verify.compare_dump_files(None, None, expected_dump, actual_dump)
# Test that svnadmin dump --file overwrites existing files.
file = sbox.get_tempname()
svntest.main.file_write(file, '')
svntest.actions.run_and_verify_svnadmin2([],
["* Dumped revision 0.\n",
"* Dumped revision 1.\n"],
0, 'dump', '--file', file,
sbox.repo_dir)
actual_dump = open(file, 'rb').readlines()
svntest.verify.compare_dump_files(None, None, expected_dump, actual_dump)
def load_from_file(sbox):
"svnadmin load --file ARG"
sbox.build(empty=True)
file = sbox.get_tempname()
with open(file, 'wb') as f:
f.writelines(clean_dumpfile())
svntest.actions.run_and_verify_svnadmin2(None, [],
0, 'load', '--file', file,
'--ignore-uuid', sbox.repo_dir)
expected_tree = \
svntest.wc.State('', {
'A' : svntest.wc.StateItem(contents="text\n",
props={'svn:keywords': 'Id'})
})
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [],
'update', sbox.wc_dir)
svntest.actions.verify_disk(sbox.wc_dir, expected_tree, check_props=True)
def dump_exclude(sbox):
"svnadmin dump with excluded paths"
sbox.build(create_wc=False)
# Dump repository with /A/D/H and /A/B/E paths excluded.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--exclude', '/A/D/H',
'--exclude', '/A/B/E',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r1\ .*\n',
# '/A/D/H' and '/A/B/E' is not added.
re.escape('Changed paths:\n'),
re.escape(' A /A\n'),
re.escape(' A /A/B\n'),
re.escape(' A /A/B/F\n'),
re.escape(' A /A/B/lambda\n'),
re.escape(' A /A/C\n'),
re.escape(' A /A/D\n'),
re.escape(' A /A/D/G\n'),
re.escape(' A /A/D/G/pi\n'),
re.escape(' A /A/D/G/rho\n'),
re.escape(' A /A/D/G/tau\n'),
re.escape(' A /A/D/gamma\n'),
re.escape(' A /A/mu\n'),
re.escape(' A /iota\n'),
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def dump_exclude_copysource(sbox):
"svnadmin dump with excluded copysource"
sbox.build(create_wc=False, empty=True)
# Create default repository structure.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/trunk',
sbox.repo_url + '/branches',
sbox.repo_url + '/tags',
"-m", "Create repository structure.")
# Create a branch.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "copy",
sbox.repo_url + '/trunk',
sbox.repo_url + '/branches/branch1',
"-m", "Create branch.")
# Dump repository with /trunk excluded.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--exclude', '/trunk',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r2\ .*\n',
re.escape('Changed paths:\n'),
# Simple add, not copy.
re.escape(' A /branches/branch1\n'),
'-+\\n',
'r1\ .*\n',
# '/trunk' is not added.
re.escape('Changed paths:\n'),
re.escape(' A /branches\n'),
re.escape(' A /tags\n'),
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def dump_include(sbox):
"svnadmin dump with included paths"
sbox.build(create_wc=False, empty=True)
# Create a couple of directories.
# Note that we can't use greek tree as it contains only two top-level
# nodes. Including non top-level nodes (e.g. '--include /A/B/E') will
# produce unloadable dump for now.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/A',
sbox.repo_url + '/B',
sbox.repo_url + '/C',
"-m", "Create folder.")
# Dump repository with /A and /C paths included.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--include', '/A',
'--include', '/C',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r1\ .*\n',
# '/B' is not added.
re.escape('Changed paths:\n'),
re.escape(' A /A\n'),
re.escape(' A /C\n'),
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def dump_not_include_copysource(sbox):
"svnadmin dump with not included copysource"
sbox.build(create_wc=False, empty=True)
# Create default repository structure.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/trunk',
sbox.repo_url + '/branches',
sbox.repo_url + '/tags',
"-m", "Create repository structure.")
# Create a branch.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "copy",
sbox.repo_url + '/trunk',
sbox.repo_url + '/branches/branch1',
"-m", "Create branch.")
# Dump repository with only /branches included.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--include', '/branches',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r2\ .*\n',
re.escape('Changed paths:\n'),
# Simple add, not copy.
re.escape(' A /branches/branch1\n'),
'-+\\n',
'r1\ .*\n',
# Only '/branches' is added in r1.
re.escape('Changed paths:\n'),
re.escape(' A /branches\n'),
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def dump_exclude_by_pattern(sbox):
"svnadmin dump with paths excluded by pattern"
sbox.build(create_wc=False, empty=True)
# Create a couple of directories.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/aaa',
sbox.repo_url + '/aab',
sbox.repo_url + '/aac',
sbox.repo_url + '/bbc',
"-m", "Create repository structure.")
# Dump with paths excluded by pattern.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--exclude', '/aa?',
'--pattern',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r1\ .*\n',
re.escape('Changed paths:\n'),
# Only '/bbc' is added in r1.
re.escape(' A /bbc\n'),
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def dump_include_by_pattern(sbox):
"svnadmin dump with paths included by pattern"
sbox.build(create_wc=False, empty=True)
# Create a couple of directories.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/aaa',
sbox.repo_url + '/aab',
sbox.repo_url + '/aac',
sbox.repo_url + '/bbc',
"-m", "Create repository structure.")
# Dump with paths included by pattern.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--include', '/aa?',
'--pattern',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r1\ .*\n',
# '/bbc' is not added.
re.escape('Changed paths:\n'),
re.escape(' A /aaa\n'),
re.escape(' A /aab\n'),
re.escape(' A /aac\n'),
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def dump_exclude_all_rev_changes(sbox):
"svnadmin dump with all revision changes excluded"
sbox.build(create_wc=False, empty=True)
# Create a couple of directories (r1).
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/r1a',
sbox.repo_url + '/r1b',
sbox.repo_url + '/r1c',
"-m", "Revision 1.")
# Create a couple of directories (r2).
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/r2a',
sbox.repo_url + '/r2b',
sbox.repo_url + '/r2c',
"-m", "Revision 2.")
# Create a couple of directories (r3).
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/r3a',
sbox.repo_url + '/r3b',
sbox.repo_url + '/r3c',
"-m", "Revision 3.")
# Dump with paths excluded by pattern.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--exclude', '/r2?',
'--pattern',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log. Revision properties ('svn:log' etc.) should be empty for r2.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r3 | jrandom | .* | 1 line\\n',
re.escape('Changed paths:'),
re.escape(' A /r3a'),
re.escape(' A /r3b'),
re.escape(' A /r3c'),
'',
re.escape('Revision 3.'),
'-+\\n',
re.escape('r2 | (no author) | (no date) | 1 line'),
'',
'',
'-+\\n',
'r1 | jrandom | .* | 1 line\\n',
re.escape('Changed paths:'),
re.escape(' A /r1a'),
re.escape(' A /r1b'),
re.escape(' A /r1c'),
'',
re.escape('Revision 1.'),
'-+\\n',
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', sbox2.repo_url)
def dump_invalid_filtering_option(sbox):
"dump with --include and --exclude simultaneously"
sbox.build(create_wc=False, empty=False)
# Attempt to dump repository with '--include' and '--exclude' options
# specified simultaneously.
expected_error = ".*: '--exclude' and '--include' options cannot be used " \
"simultaneously"
svntest.actions.run_and_verify_svnadmin(None, expected_error,
'dump', '-q',
'--exclude', '/A/D/H',
'--include', '/A/B/E',
sbox.repo_dir)
@Issue(4725)
def load_issue4725(sbox):
"""load that triggers issue 4725"""
sbox.build(empty=True)
sbox.simple_mkdir('subversion')
sbox.simple_commit()
sbox.simple_mkdir('subversion/trunk')
sbox.simple_mkdir('subversion/branches')
sbox.simple_commit()
sbox.simple_mkdir('subversion/trunk/src')
sbox.simple_commit()
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump, '-M100')
@Issue(4767)
def dump_no_canonicalize_svndate(sbox):
"svnadmin dump shouldn't canonicalize svn:date"
sbox.build(create_wc=False, empty=True)
svntest.actions.enable_revprop_changes(sbox.repo_dir)
# set svn:date in a non-canonical format (not six decimal places)
propval = "2015-01-01T00:00:00.0Z"
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [],
"propset", "--revprop", "-r0", "svn:date",
propval,
sbox.repo_url)
dump_lines = svntest.actions.run_and_verify_dump(sbox.repo_dir)
assert propval.encode() + b'\n' in dump_lines
def check_recover_prunes_rep_cache(sbox, enable_rep_sharing):
"""Check 'recover' prunes the rep-cache while enable-rep-sharing is
true/false.
"""
# Remember the initial rep cache content.
rep_cache_r1 = read_rep_cache(sbox.repo_dir)
#print '\n'.join([h + ": " + repr(ref) for h, ref in rep_cache_r1.items()])
# Commit one new rep and check the rep-cache is extended.
sbox.simple_append('iota', 'New line.\n')
sbox.simple_commit()
rep_cache_r2 = read_rep_cache(sbox.repo_dir)
if not (len(rep_cache_r2) == len(rep_cache_r1) + 1):
raise svntest.Failure
fsfs_conf = svntest.main.get_fsfs_conf_file_path(sbox.repo_dir)
svntest.main.file_append(fsfs_conf,
# Add a newline in case the existing file doesn't
# end with one.
"\n"
"[rep-sharing]\n"
"enable-rep-sharing = %s\n"
% (('true' if enable_rep_sharing else 'false'),))
# Break r2 in such a way that 'recover' will discard it
head_rev_path = fsfs_file(sbox.repo_dir, 'revs', '2')
os.remove(head_rev_path)
current_path = os.path.join(sbox.repo_dir, 'db', 'current')
svntest.main.file_write(current_path, '1\n')
# Recover back to r1.
svntest.actions.run_and_verify_svnadmin(None, [],
"recover", sbox.repo_dir)
svntest.actions.run_and_verify_svnlook(['1\n'], [], 'youngest',
sbox.repo_dir)
# Check the rep-cache is pruned.
rep_cache_recovered = read_rep_cache(sbox.repo_dir)
if not (rep_cache_recovered == rep_cache_r1):
raise svntest.Failure
@Issue(4077)
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.python_sqlite_can_read_without_rowid)
def recover_prunes_rep_cache_when_enabled(sbox):
"recover prunes rep cache when enabled"
sbox.build()
check_recover_prunes_rep_cache(sbox, enable_rep_sharing=True)
@Issue(4077)
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.python_sqlite_can_read_without_rowid)
def recover_prunes_rep_cache_when_disabled(sbox):
"recover prunes rep cache when disabled"
sbox.build()
check_recover_prunes_rep_cache(sbox, enable_rep_sharing=False)
@Issue(4760)
def dump_include_copied_directory(sbox):
"include copied directory with nested nodes"
sbox.build(create_wc=False)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "copy",
sbox.repo_url + '/A/D',
sbox.repo_url + '/COPY',
"-m", "Create branch.")
# Dump repository with only /COPY path included.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--include', '/COPY',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r2\ .*\n',
# Only '/COPY' is added
re.escape('Changed paths:\n'),
re.escape(' A /COPY'),
re.escape(' A /COPY/G'),
re.escape(' A /COPY/G/pi'),
re.escape(' A /COPY/G/rho'),
re.escape(' A /COPY/G/tau'),
re.escape(' A /COPY/H'),
re.escape(' A /COPY/H/chi'),
re.escape(' A /COPY/H/omega'),
re.escape(' A /COPY/H/psi'),
re.escape(' A /COPY/gamma'),
'-+\\n',
'r1\ .*\n',
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def load_normalize_node_props(sbox):
"svnadmin load --normalize node props"
dump_str = b"""SVN-fs-dump-format-version: 2
UUID: dc40867b-38f6-0310-9f5f-f81aa277e06f
Revision-number: 0
Prop-content-length: 56
Content-length: 56
K 8
svn:date
V 27
2005-05-03T19:09:41.129900Z
PROPS-END
Revision-number: 1
Prop-content-length: 99
Content-length: 99
K 7
svn:log
V 0
K 10
svn:author
V 2
pl
K 8
svn:date
V 27
2005-05-03T19:10:19.975578Z
PROPS-END
Node-path:
Node-kind: dir
Node-action: change
Prop-content-length: 32
Content-length: 32
K 10
svn:ignore
V 3
\n\r\n
PROPS-END
"""
sbox.build(empty=True)
# Try to load the dumpstream, expecting a failure (because of mixed
# EOLs in the svn:ignore property value).
exp_err = svntest.verify.RegexListOutput(['svnadmin: E125005:.*',
'svnadmin: E125017:.*'],
match_all=False)
load_and_verify_dumpstream(sbox, [], exp_err, dumpfile_revisions,
False, dump_str, '--ignore-uuid')
# Now try it again with prop normalization.
svntest.actions.load_repo(sbox, dump_str=dump_str,
bypass_prop_validation=False,
normalize_props=True)
# We should get the normalized property value.
exit_code, output, _ = svntest.main.run_svn(None, 'pg', 'svn:ignore',
'--no-newline',
sbox.repo_url)
svntest.verify.verify_exit_code(None, exit_code, 0)
if output != ['\n', '\n']:
raise svntest.Failure("Unexpected property value %s" % output)
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
extra_headers,
extra_blockcontent,
inconsistent_headers,
empty_date,
dump_copied_dir,
dump_move_dir_modify_child,
dump_quiet,
hotcopy_dot,
hotcopy_format,
setrevprop,
verify_windows_paths_in_repos,
verify_incremental_fsfs,
fsfs_recover_db_current,
fsfs_recover_old_db_current,
load_with_parent_dir,
set_uuid,
reflect_dropped_renumbered_revs,
fsfs_recover_handle_missing_revs_or_revprops_file,
create_in_repo_subdir,
verify_with_invalid_revprops,
dont_drop_valid_mergeinfo_during_incremental_loads,
hotcopy_symlink,
load_bad_props,
verify_non_utf8_paths,
test_lslocks_and_rmlocks,
load_ranges,
hotcopy_incremental,
hotcopy_incremental_packed,
locking,
mergeinfo_race,
recover_old_empty,
verify_keep_going,
verify_keep_going_quiet,
verify_invalid_path_changes,
verify_denormalized_names,
fsfs_recover_old_non_empty,
fsfs_hotcopy_old_non_empty,
load_ignore_dates,
fsfs_hotcopy_old_with_id_changes,
verify_packed,
freeze_freeze,
verify_metadata_only,
verify_quickly,
fsfs_hotcopy_progress,
fsfs_hotcopy_progress_with_revprop_changes,
fsfs_hotcopy_progress_old,
freeze_same_uuid,
upgrade,
load_txdelta,
load_no_svndate_r0,
hotcopy_read_only,
fsfs_pack_non_sharded,
load_revprops,
dump_revprops,
dump_no_op_change,
dump_no_op_prop_change,
load_no_flush_to_disk,
dump_to_file,
load_from_file,
dump_exclude,
dump_exclude_copysource,
dump_include,
dump_not_include_copysource,
dump_exclude_by_pattern,
dump_include_by_pattern,
dump_exclude_all_rev_changes,
dump_invalid_filtering_option,
load_issue4725,
dump_no_canonicalize_svndate,
recover_prunes_rep_cache_when_enabled,
recover_prunes_rep_cache_when_disabled,
dump_include_copied_directory,
load_normalize_node_props,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
| 39.335353 | 118 | 0.561419 | [
"Apache-2.0"
] | auycro/subversion | subversion/tests/cmdline/svnadmin_tests.py | 162,337 | Python |
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
autodoc_mock_imports = ["adafruit_register", "adafruit_bus_device"]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'BusDevice': ('https://circuitpython.readthedocs.io/projects/busdevice/en/latest/', None),'Register': ('https://circuitpython.readthedocs.io/projects/register/en/latest/', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Adafruit MPU6050 Library'
copyright = u'2019 Bryan Siepert'
author = u'Bryan Siepert'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env', 'CODE_OF_CONDUCT.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = '_static/favicon.ico'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AdafruitMpu6050Librarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AdafruitMPU6050Library.tex', u'AdafruitMPU6050 Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'AdafruitMPU6050library', u'Adafruit MPU6050 Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AdafruitMPU6050Library', u'Adafruit MPU6050 Library Documentation',
author, 'AdafruitMPU6050Library', 'One line description of project.',
'Miscellaneous'),
]
| 33.621118 | 324 | 0.689082 | [
"MIT"
] | FoamyGuy/Adafruit_CircuitPython_MPU6050 | docs/conf.py | 5,413 | Python |
#!/usr/bin/env python3
def selection_sort(lst):
length = len(lst)
for i in range(length - 1):
least = i
for k in range(i + 1, length):
if lst[k] < lst[least]:
least = k
lst[least], lst[i] = (lst[i], lst[least])
return lst
print(selection_sort([5, 2, 4, 6, 1, 3])) | 23.571429 | 49 | 0.512121 | [
"MIT"
] | udohsolomon/LearnAlgorithms | sort/selection_sort.py | 330 | Python |
from setuptools import setup, find_packages
setup(
name='spot',
version='0.1',
packages=find_packages(),
include_package_data=True,
author="Steven Feltner",
author_email="[email protected]",
license="MIT",
install_requires=[
"Click",
"spotinst-sdk2",
"requests",
],
entry_points='''
[console_scripts]
spot-account-id=spot_account_id:cli
''',
) | 21.65 | 43 | 0.6097 | [
"Apache-2.0"
] | stevenfeltner/terraform-spotinst-data-get-accountid | scripts/setup.py | 433 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import random
import re
import requests
import psutil
import pytest
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.shell.util import run_impala_shell_cmd
class TestWebPage(CustomClusterTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def setup_class(cls):
if cls.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
super(TestWebPage, cls).setup_class()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--enable_extended_memory_metrics=true"
)
def test_varz_hidden_variables(self):
"""Tests that modified hidden variables show up in /varz"""
response = requests.get("http://localhost:25000/varz?json")
assert response.status_code == requests.codes.ok
varz_json = json.loads(response.text)
flag = [e for e in varz_json["flags"]
if e["name"] == "enable_extended_memory_metrics"]
assert len(flag) == 1
assert flag[0]["default"] == "false"
assert flag[0]["current"] == "true"
assert flag[0]["experimental"]
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--webserver_max_post_length_bytes=100"
)
def test_max_post_length(self):
"""Tests that the maximum length of a POST request that will be accepted"""
too_big_post_content = "c" * 10000
# POST that exceeds the limit
response = requests.post("http://localhost:25000/", too_big_post_content)
assert response.status_code == requests.codes.request_entity_too_large
# POST within the limit
# This is on a URI that does not understand POST and treats it like a GET.
ok_post_content = "c" * 100
response = requests.post("http://localhost:25000/", ok_post_content)
assert response.status_code == requests.codes.ok
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args()
def test_webserver_interface(self):
addrs = psutil.net_if_addrs()
print("net_if_addrs returned: %s" % addrs)
ip_matcher = re.compile("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
ip_addrs = []
for addr in addrs:
for snic in addrs[addr]:
if ip_matcher.match(snic.address):
ip_addrs.append(snic.address)
# There must be at least one available interface on the machine.
assert len(ip_addrs) > 0, addrs
ports = ["25000", "25010", "25020"]
# With default args, the webserver should be accessible over all interfaces for all
# daemons.
for ip in ip_addrs:
for port in ports:
response = requests.get("http://%s:%s/" % (ip, port))
assert response.status_code == requests.codes.ok, ip
# Pick a random interface and restart with the webserver on that interface.
interface = random.choice(ip_addrs)
self._start_impala_cluster(["--impalad_args=--webserver_interface=%s" % interface])
# Now the webserver should only be accessible over the choosen interface.
for ip in ip_addrs:
try:
response = requests.get("http://%s:25000/" % ip)
assert ip == interface
assert response.status_code == requests.codes.ok, ip
except requests.exceptions.ConnectionError:
assert ip != interface
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--query_stmt_size=0"
)
def test_query_stmt_without_truncate(self):
"""Check if the full query string is displayed in the query list on the WebUI."""
# The input query is a select + 450 'x ' long.
query_select = "x " * 450
query = 'select "{0}"'.format(query_select)
# In the site there is an extra \ before the " so we need that in the expected
# response too.
expected = 'select \\"{0}\\"'.format(query_select)
self.execute_query(query)
response = requests.get("http://localhost:25000/queries?json")
response_json = response.text
assert expected in response_json, "No matching statement found in the queries site."
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--query_stmt_size=10"
)
def test_query_stmt_with_custom_length(self):
"""Check if the partial query with the correct length is displayed in the query list
on the WebUI."""
# The input query is a select + 450 'x ' long.
query = 'select "{0}"'.format("x " * 450)
# Searching for the custom, 10 chars long response. In the site there is an extra \
# before the " so we need that in the expected response too.
expected = 'select \\"x ...'
self.execute_query(query)
response = requests.get("http://localhost:25000/queries?json")
response_json = response.text
assert expected in response_json, "No matching statement found in the queries site."
# Checks if 'messages' exists/does not exist in 'result_stderr' based on the value of
# 'should_exist'
def _validate_shell_messages(self, result_stderr, messages, should_exist=True):
for msg in messages:
if should_exist:
assert msg in result_stderr, result_stderr
else:
assert msg not in result_stderr, result_stderr
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--ping_expose_webserver_url=false"
)
def test_webserver_url_not_exposed(self, vector):
if vector.get_value('table_format').file_format != 'text':
pytest.skip('runs only for text table_format')
# If webserver url is not exposed, debug web urls shouldn't be printed out.
shell_messages = ["Query submitted at: ", "(Coordinator: ",
"Query progress can be monitored at: "]
query_shell_arg = '--query=select * from functional.alltypes'
# hs2
results = run_impala_shell_cmd(vector, [query_shell_arg])
self._validate_shell_messages(results.stderr, shell_messages, should_exist=False)
# beeswax
results = run_impala_shell_cmd(vector, ['--protocol=beeswax', query_shell_arg])
self._validate_shell_messages(results.stderr, shell_messages, should_exist=False)
# Even though webserver url is not exposed, it is still accessible.
page = requests.get('http://localhost:25000')
assert page.status_code == requests.codes.ok
| 40.732558 | 88 | 0.714673 | [
"Apache-2.0"
] | AlexanderSaydakov/impala | tests/custom_cluster/test_web_pages.py | 7,006 | Python |
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, dims, multiplxer=4):
super(MLP, self).__init__()
hidden = int(dims * multiplxer)
self.out = nn.Sequential(
nn.Linear(dims, hidden),
nn.GELU(),
nn.Linear(hidden, dims)
)
def forward(self, x):
return self.out(x)
class MixerLayer(nn.Module):
def __init__(self, seq, dims):
super(MixerLayer, self).__init__()
self.layer_norm1 = nn.LayerNorm(dims)
self.mlp1 = MLP(seq, multiplxer=0.5)
self.layer_norm2 = nn.LayerNorm(dims)
self.mlp2 = MLP(dims)
def forward(self, x):
out = self.layer_norm1(x).transpose(1, 2)
out = self.mlp1(out).transpose(1, 2)
out += x
out2 = self.layer_norm2(out)
out2 = self.mlp2(out2)
out2 += out
return out2
class MLPMixer(nn.Module):
def __init__(self, seq, in_dims, dims, patch=32, n_classes=10, N=12):
super(MLPMixer, self).__init__()
# self.embedding = nn.Linear(in_dims, dims)
self.embedding = nn.Conv2d(3, dims, kernel_size=patch, stride=patch)
self.layers = nn.ModuleList()
for _ in range(N):
self.layers.append(MixerLayer(seq, dims))
self.gap = nn.AdaptiveAvgPool1d(1)
self.fc = nn.Linear(dims, n_classes)
self.dims = dims
def forward(self, x):
out = self.embedding(x)
out = out.permute(0, 2, 3, 1).view(x.size(0), -1, self.dims)
for layer in self.layers:
out = layer(out)
out = out.mean(dim=1)
out = self.fc(out)
return out
class Affine(nn.Module):
def __init__(self, dims):
super(Affine, self).__init__()
self.alpha = nn.Parameter(torch.ones(dims))
self.beta = nn.Parameter(torch.zeros(dims))
def forward(self, x):
return self.alpha * x + self.beta
class ResMLPBlock(nn.Module):
def __init__(self, nb_patches, dims, layerscale_init):
super(ResMLPBlock, self).__init__()
self.affine1 = Affine(dims)
self.affine2 = Affine(dims)
self.linear_patches = nn.Linear(nb_patches, nb_patches)
self.mlp_channels = MLP(dims)
self.layerscale1 = nn.Parameter(layerscale_init * torch.ones(dims))
self.layerscale2 = nn.Parameter(layerscale_init * torch.ones(dims))
def forward(self, x):
out1 = self.linear_patches(self.affine1(x).transpose(1, 2)).transpose(1, 2)
x = x + self.layerscale1 * out1
out2 = self.mlp_channels(self.affine2(x))
x = x + self.layerscale2 * out2
return x
class ResMLP(nn.Module):
def __init__(self, dims, layerscale_init=1e-4, size=224, patch=32, num_classes=10, N=12):
super(ResMLP, self).__init__()
n = (size * size) // patch ** 2
self.dims = dims
self.embedding = nn.Conv2d(3, dims, kernel_size=patch, stride=patch)
self.blocks = nn.ModuleList([ResMLPBlock(n, dims, layerscale_init) for _ in range(N)])
self.affine = Affine(dims)
self.out = nn.Linear(dims, num_classes)
def forward(self, x):
out = self.embedding(x).permute(0, 2, 3, 1).view(x.size(0), -1, self.dims)
for layer in self.blocks:
out = layer(out)
out = self.affine(out).mean(dim=1)
return self.out(out)
class TinyAttention(nn.Module):
def __init__(self, d_ffn, d_attn=64):
super(TinyAttention, self).__init__()
self.qkv = nn.Linear(d_ffn, 3 * d_attn)
self.d_attn = d_attn
self.softmax = nn.Softmax(dim=-1)
self.out = nn.Linear(d_attn, d_ffn // 2)
def forward(self, x):
qkv = self.qkv(x)
q, k, v = torch.chunk(qkv, 3, dim=-1)
energy = torch.bmm(q, k.transpose(1, 2)) / (self.d_attn ** 0.5)
attn = self.softmax(energy)
out = torch.bmm(attn, v)
out = self.out(out)
return out
class SpatialGatingUnit(nn.Module):
def __init__(self, seq_len, d_model, attn=True):
super(SpatialGatingUnit, self).__init__()
self.layer_norm = nn.LayerNorm(d_model // 2)
self.spatial = nn.Conv1d(seq_len, seq_len, kernel_size=1)
self.is_attn = attn
if self.is_attn:
self.attn = TinyAttention(d_model)
def forward(self, x):
if self.is_attn:
shortcut = x
shortcut = self.attn(shortcut)
u, v = torch.chunk(x, 2, dim=-1)
v = self.layer_norm(v)
v = self.spatial(v)
if self.is_attn:
v += shortcut
return u * v
class gMLPBlock(nn.Module):
def __init__(self, seq_len, d_model, d_ffn):
super(gMLPBlock, self).__init__()
self.layer_norm = nn.LayerNorm(d_model)
self.channel1 = nn.Linear(d_model, d_ffn)
self.sgu = SpatialGatingUnit(seq_len, d_ffn)
self.channel2 = nn.Linear(d_ffn // 2, d_model)
def forward(self, x):
shortcut = x
out = self.layer_norm(x)
out = self.channel1(out)
out = F.gelu(out)
out = self.sgu(out)
out = self.channel2(out)
return out + shortcut
class gMLP(nn.Module):
def __init__(self, seq_len, d_model, d_ffn, patch=32, N=12, n_classes=10):
super(gMLP, self).__init__()
self.d_model = d_model
self.embedding = nn.Conv2d(3, d_model, kernel_size=patch, stride=patch)
self.layers = nn.ModuleList([gMLPBlock(seq_len, d_model, d_ffn) for _ in range(N)])
self.out = nn.Linear(d_model, n_classes)
def forward(self, x):
out = self.embedding(x).permute(0, 2, 3, 1).view(x.size(0), -1, self.d_model)
for layer in self.layers:
out = layer(out)
out = out.mean(1)
out = self.out(out)
return out
def main():
x = torch.randn(2, 12, 32)
m = MLPMixer(12, 32, 32)
m(x)
# if __name__ == '__main__':
# main()
| 28.698565 | 94 | 0.589196 | [
"MIT"
] | leaderj1001/Bag-of-MLP | model.py | 5,998 | Python |
import time
from datetime import datetime, timedelta
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
from flask.views import MethodView
from config.setting import BOT_TOKEN
from models import User
hitcon_zeroday_base_url = "https://zeroday.hitcon.org"
hitcon_zeroday_all_url = "https://zeroday.hitcon.org/vulnerability/all"
hitcon_zeroday_disclosed_url = "https://zeroday.hitcon.org/vulnerability/disclosed/"
def get_last_page_num(hitcon_zeroday_url):
r = requests.get(hitcon_zeroday_url)
soup = BeautifulSoup(r.text, 'html.parser')
try:
return int(soup.find("span", {"class": "last-page"}).text)
except Exception:
return 0
def get_report_info(report_url):
r = requests.get(report_url)
if r.status_code != 200:
return {}
soup = BeautifulSoup(r.text, 'html.parser')
last_update_str = soup.find("div", {"class": "status-descr"}).text
last_update_date = datetime.strptime(last_update_str, r"Last Update : %Y/%m/%d").date()
# Get utf+8 datetime
now_utc_plus_8 = datetime.utcnow() + timedelta(hours=8)
# Get only yesterday's data
now_date = datetime.strptime((now_utc_plus_8 - timedelta(days=1)).strftime("%Y/%m/%d"), "%Y/%m/%d").date()
if now_date != last_update_date:
return {}
data = {
"status": soup.find("div", {"class": "status-label"}).text
}
for li in soup.find("div", {"class": "info"}).findAll("li"):
if "風險" in li.text:
data["risk"] = li.text.split(":")[-1]
elif "類型" in li.text:
data["type"] = li.text.split(":")[-1]
return data
def search_page(hitcon_zeroday_base_url, hitcon_zeroday_category_url):
last_page_num = get_last_page_num(hitcon_zeroday_category_url)
msg_list = []
msg_list_len = len(msg_list)
for page_num in range(1, last_page_num+1):
page_url = urljoin(hitcon_zeroday_category_url, f"page/{page_num}")
r = requests.get(page_url)
if r.status_code != 200:
break
soup = BeautifulSoup(r.text, 'html.parser')
# parse all blocks
for li in soup.findAll("li", {"class": "strip"}):
a = li.find("h4").find("a")
report_url = urljoin(hitcon_zeroday_base_url, a["href"])
title = a.text
_data = get_report_info(report_url)
if _data:
msg_list.append(f"[[{_data['status']} - {_data['risk']}]] {_data['type']}")
msg_list.append(f"[{title}]({report_url})")
# break if not append new data
if len(msg_list) == msg_list_len:
break
msg_list_len = len(msg_list)
return msg_list
def send_message(chat_id, msg):
api_url = f"https://api.telegram.org/bot{BOT_TOKEN}/sendMessage?chat_id={chat_id}&parse_mode=Markdown&disable_web_page_preview=1&text={msg}"
requests.get(api_url)
class App(MethodView):
def get(self):
msg_list = search_page(hitcon_zeroday_base_url, hitcon_zeroday_all_url)
report_msg = "%0A".join(msg_list)
for user in User.get_all():
print(user, report_msg)
send_message(user["chat_id"], report_msg)
return "OK"
| 34.880435 | 144 | 0.642879 | [
"MIT"
] | zondaTW/gcp-tg-bot | apps/hitcon_zeroday.py | 3,221 | Python |
"""Module containing methods that allow to identify task goals."""
# Copyright (c) 2022, ABB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with
# or without modification, are permitted provided that
# the following conditions are met:
#
# * Redistributions of source code must retain the
# above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the
# distribution.
# * Neither the name of ABB nor the names of its
# contributors may be used to endorse or promote
# products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Any, List
from behaviors.common_behaviors import RSequence
from bt_learning.learning_from_demo.constraints_identification import contains_conflicting
from bt_learning.learning_from_demo.demonstration import Demonstration
from py_trees.trees import BehaviourTree
def goal_conditions_for_demo(
demo: Demonstration,
behaviors: Any
) -> List[str]:
"""
Infer the goal conditions of a single demonstration.
Args
----
demo: the demonstration to infer the goal of.
behavior: check the behavior to remove conflicting conditions.
Returns
-------
goals: list of the goals inferred in the demonstration.
"""
goals = []
for i in range(len(demo)-1, -1, -1):
for condition in demo[i].postconditions():
if condition not in goals and not contains_conflicting(behaviors, goals, condition):
goals.append(condition)
goals.reverse()
return goals
def goal_tree(
goals: List[str],
behaviors: Any,
world_interface: Any
) -> BehaviourTree:
"""
Construct a Behavior Tree strarting from the goals.
Args
----
goals: list of all goals inferred from the demonstration.
behaviors: behavior in the demontration, as defined in robot_behaviors package.
world_interface: interface to the robot.
Returns
-------
tree: a Behavior Tree of goal conditions.
"""
tree = RSequence()
for goal in goals:
node, _ = behaviors.get_node_from_string(goal, world_interface, None)
tree.add_child(node)
return tree
| 34.329787 | 96 | 0.721103 | [
"BSD-3-Clause"
] | matiov/disambiguate-BT-execution | bt_learning/bt_learning/learning_from_demo/goal_identification.py | 3,227 | Python |
#!/usr/bin/env python
import sys
from loginas import __version__
assert sys.version >= "3.4", "Requires Python v3.4 or above."
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name="django-loginas",
version=__version__,
author="Stochastic Technologies",
author_email="[email protected]",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/stochastic-technologies/django-loginas/",
description="""An app to add a "Log in as user" button in the Django user admin page.""",
license="BSD",
keywords="django",
zip_safe=False,
include_package_data=True,
packages=["loginas"],
package_dir={"loginas": "loginas"},
)
| 26.633333 | 93 | 0.707134 | [
"BSD-3-Clause"
] | pacahon/django-loginas | setup.py | 799 | Python |
import json
import crawlKoreaData_All as crawl1
import crawlKoreaData_Gyeonggi as crawl2
import crawlKoreaData_Seoul as crawl3
import LED_Display as LMD
import threading
from datetime import date, timedelta
import datetime
from matrix import *
today = date.today()
oneday = datetime.timedelta(days=1)
yesterday = today - oneday
third = today - oneday - oneday
four = today - oneday - oneday
a = str(today)
b = str(yesterday)
c = str(third)
d = str(four)
def LED_init():
thread=threading.Thread(target=LMD.main, args=())
thread.setDaemon(True)
thread.start()
return
crawl1.run()
crawl2.run()
crawl3.run()
def draw_matrix(array):
for x in range(16):
for y in range(32):
if array[x][y] == 1:
LMD.set_pixel(x,y,4) #BLUE
elif array[x][y] == 2:
LMD.set_pixel(x,y,2) #GREEN
elif array[x][y] == 3:
LMD.set_pixel(x,y,3) #YELLOW
elif array[x][y] == 4:
LMD.set_pixel(x,y,1) #RED
elif array[x][y] == 5:
LMD.set_pixel(x,y,5) #PINK
elif array[x][y] == 6:
LMD.set_pixel(x,y,6) #CYAN
elif array[x][y] == 7:
LMD.set_pixel(x,y,7) #WHITE
elif array[x][y] == 0:
LMD.set_pixel(x,y,0)
else:
continue
print()
array_screen = [
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
number_array = [
[[1,1,1,0],
[1,0,1,0],
[1,0,1,0],
[1,0,1,0],
[1,1,1,0]], #0
[[0,1,0,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0]], #1
[[1,1,1,0],
[0,0,1,0],
[1,1,1,0],
[1,0,0,0],
[1,1,1,0]], #2
[[1,1,1,0],
[0,0,1,0],
[1,1,1,0],
[0,0,1,0],
[1,1,1,0]], #3
[[1,0,1,0],
[1,0,1,0],
[1,1,1,0],
[0,0,1,0],
[0,0,1,0]], #4
[[1,1,1,0],
[1,0,0,0],
[1,1,1,0],
[0,0,1,0],
[1,1,1,0]], #5
[[1,1,1,0],
[1,0,0,0],
[1,1,1,0],
[1,0,1,0],
[1,1,1,0]], #6
[[1,1,1,0],
[0,0,1,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0]], #7
[[1,1,1,0],
[1,0,1,0],
[1,1,1,0],
[1,0,1,0],
[1,1,1,0]], #8
[[1,1,1,0],
[1,0,1,0],
[1,1,1,0],
[0,0,1,0],
[0,0,1,0]], #9
]
covid_array = [
[[0, 1, 1, 1, 1],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 1, 1, 1]], # C
[[0, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[0, 1, 1, 1, 0]], # O
[[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]], # V
[[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0]], # I
[[1, 1, 1, 0, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 1, 0],
[1, 1, 1, 0, 0]] # D
]
arrow_array = [
[[0,1,0],
[1,1,1],
[0,1,0],
[0,1,0],
[0,1,0]]
]
comma_array = [
[[0,0],
[1,0],
[0,0],
[1,0],
[0,0]]
]
LED_init()
draw_matrix(array_screen); print()
################################TODAY###################################################
def today_compare_data(js_file1, js_file2, search_region ,confirmed_cmp, array):
with open(js_file1, "r", encoding="utf-8") as f1:
with open(js_file2, "r", encoding="utf-8") as f2:
json_data_1 = json.load(f1)
json_data_2 = json.load(f2)
for i in range(0, len(json_data_1) - 1):
region = json_data_1[i]['지역이름']
confirmed_1 = json_data_1[i]['확진자수']
confirmed_2 = json_data_2[i]['확진자수']
cmp_data = confirmed_1 - confirmed_2
confirmed_cmp.append({
'지역이름' : region,
'전날비교' : cmp_data
})
for i in range(0,len(confirmed_cmp)):
if (confirmed_cmp[i]['지역이름']) == search_region:
list = [int(i) for i in str(confirmed_cmp[i]['전날비교'])]
for j in range(0,len(list)):
for x in range(5):
for y in range(22+4*j, 26+4*j):
array[x][y] = number_array[list[j]][x][y-22-4*j]
for x in range(5):
for y in range(21+4*len(list),21+4*len(list)+3):
array[x][y] = arrow_array[0][x][y-21-4*len(list)]
return confirmed_cmp
# 지역별 확진자 수 검색 함수 (LED구현)
def today_search_count(js_file,search_region,array):
with open (js_file,"r",encoding="utf-8") as f:
json_data = json.load(f)
for i in range(0,len(json_data)-1):
if (json_data[i]['지역이름']) == search_region:
print(json_data[i]['확진자수'])
list =[int(i) for i in str(json_data[i]['확진자수'])]
for j in range(0,len(list)):
for x in range(5):
for y in range(10+4*j,13+4*j):
array[x][y] = number_array[list[j]][x][y-4*j-10]
def today_date_print(array):
a_list = a[8:10]
list = [int(i) for i in str(a_list)]
for j in range(0,len(list)):
for x in range(5):
for y in range(0+4*j,4+4*j):
array[x][y] = number_array[list[j]][x][y-4*j]
for j in range(1):
for x in range(5):
for y in range(8,10):
array[x][y] = comma_array[0][x][y-8]
####################################################################################
################################YESTERDAY###################################################
def yesterday_compare_data(js_file1, js_file2, search_region ,confirmed_cmp, array):
with open(js_file1, "r", encoding="utf-8") as f1:
with open(js_file2, "r", encoding="utf-8") as f2:
json_data_1 = json.load(f1)
json_data_2 = json.load(f2)
for i in range(0, len(json_data_1) - 1):
region = json_data_1[i]['지역이름']
confirmed_1 = json_data_1[i]['확진자수']
confirmed_2 = json_data_2[i]['확진자수']
cmp_data = confirmed_1 - confirmed_2
confirmed_cmp.append({
'지역이름' : region,
'전날비교' : cmp_data
})
for i in range(0,len(confirmed_cmp)):
if (confirmed_cmp[i]['지역이름']) == search_region:
list = [int(i) for i in str(confirmed_cmp[i]['전날비교'])]
for j in range(0,len(list)):
for x in range(6,11):
for y in range(22+4*j, 26+4*j):
array[x][y] = number_array[list[j]][x-6][y-22-4*j]
for x in range(6,11):
for y in range(21+4*len(list),21+4*len(list)+3):
array[x][y] = arrow_array[0][x-6][y-21-4*len(list)]
return confirmed_cmp
# 지역별 확진자 수 검색 함수 (LED구현)
def yesterday_search_count(js_file,search_region,array):
with open (js_file,"r",encoding="utf-8") as f:
json_data = json.load(f)
for i in range(0,len(json_data)-1):
if (json_data[i]['지역이름']) == search_region:
print(json_data[i]['확진자수'])
list =[int(i) for i in str(json_data[i]['확진자수'])]
for j in range(0,len(list)):
for x in range(6,11):
for y in range(10+4*j,13+4*j):
array[x][y] = number_array[list[j]][x-6][y-4*j-10]
def yesterday_date_print(array):
b_list = b[8:10]
list = [int(i) for i in str(b_list)]
for j in range(0,len(list)):
for x in range(6,11):
for y in range(0+4*j,4+4*j):
array[x][y] = number_array[list[j]][x-6][y-4*j]
for j in range(1):
for x in range(6,11):
for y in range(8,10):
array[x][y] = comma_array[0][x-6][y-8]
################################BEFORE_YESTERDAY###################################
def b_yesterday_compare_data(js_file1, js_file2, search_region ,confirmed_cmp, array):
with open(js_file1, "r", encoding="utf-8") as f1:
with open(js_file2, "r", encoding="utf-8") as f2:
json_data_1 = json.load(f1)
json_data_2 = json.load(f2)
for i in range(0, len(json_data_1) - 1):
region = json_data_1[i]['지역이름']
confirmed_1 = json_data_1[i]['확진자수']
confirmed_2 = json_data_2[i]['확진자수']
cmp_data = confirmed_1 - confirmed_2
confirmed_cmp.append({
'지역이름' : region,
'전날비교' : cmp_data
})
for i in range(0,len(confirmed_cmp)):
if (confirmed_cmp[i]['지역이름']) == search_region:
list = [int(i) for i in str(confirmed_cmp[i]['전날비교'])]
for j in range(0,len(list)):
for x in range(11,16):
for y in range(22+4*j, 26+4*j):
array[x][y] = number_array[list[j]][x-11][y-22-4*j]
for x in range(11,16):
for y in range(21+4*len(list),21+4*len(list)+3):
array[x][y] = arrow_array[0][x-11][y-21-4*len(list)]
return confirmed_cmp
# 지역별 확진자 수 검색 함수 (LED구현)
def b_yesterday_search_count(js_file,search_region,array):
with open (js_file,"r",encoding="utf-8") as f:
json_data = json.load(f)
for i in range(0,len(json_data)-1):
if (json_data[i]['지역이름']) == search_region:
print(json_data[i]['확진자수'])
list =[int(i) for i in str(json_data[i]['확진자수'])]
for j in range(0,len(list)):
for x in range(11,16):
for y in range(10+4*j,13+4*j):
array[x][y] = number_array[list[j]][x-11][y-4*j-10]
def b_yesterday_date_print(array):
c_list = c[8:10]
list = [int(i) for i in str(c_list)]
for j in range(0,len(list)):
for x in range(11,16):
for y in range(0+4*j,4+4*j):
array[x][y] = number_array[list[j]][x-11][y-4*j]
for j in range(1):
for x in range(11,16):
for y in range(8,10):
array[x][y] = comma_array[0][x-11][y-8]
def main_UI(array):
for j in range(0,5):
for x in range(2,7):
for y in range(1+4*j,5+4*j):
array[x][y] = covid_array[j][x-2][y-4*j-1]
def all_count(js_file,array):
with open (js_file,"r",encoding="utf-8") as f:
json_data = json.load(f)
list = [int(i) for i in str(json_data[0]['확진자수'])]
for j in range(0,len(list)):
for x in range(10,15):
for y in range(1+4*j,5+4*j):
array[x][y] = number_array[list[j]][x-10][y-4*j-1]
# 지역별 전날대비 확진자 수 증감 검색 함수
def count_change(js_file,search_region):
with open (js_file,"r",encoding="utf-8") as f:
json_data = json.load(f)
for i in range(0,len(json_data)-1):
if (json_data[i]['지역이름']) == search_region:
return json_data[i]['전날비교']
def clear_array(array):
for i in range(16):
for j in range(32):
array[i][j] = 0
main_menu = 0
menu = 1
while(menu):
print("*****Menu*****")
print("1.All")
print("2.Seoul")
print("3.Gyeonggi")
print("4.Exit")
print("**************")
if main_menu == 0:
main_UI(array_screen)
file = 'koreaData_All' + '_' + a + '.js'
all_count(file, array_screen)
draw_matrix(array_screen);print()
compare_cmp = []
menu_choice = int(input("Select menu: "))
# while > 뒤로가기 입력전까지 menu 반복시행
while menu_choice == 1: # 전국 확진자 수 검색
js_file = 'koreaData_All'+ '_'+ a +'.js'
js_file_yesterday = 'koreaData_All'+ '_'+ b +'.js'
if search_region == '0': # 0을 입력하면 메뉴로 복귀
compare_cmp = []
main_menu = 0
break
while menu_choice == 2: # 서울 세부지역 확진자 수 검색
js_file = 'koreaData_Seoul'+ '_' + a + '.js'
js_file_yesterday = 'koreaData_Seoul'+ '_' + b + '.js'
js_file_b_yesterday = 'koreaData_Seoul'+ '_' + c + '.js'
js_file_b_b_yesterday = 'koreaData_Seoul'+ '_' + d + '.js'
search_region = input("지역을 입력하세요 (ex:종로구): ")
clear_array(array_screen)
draw_matrix(array_screen);print()
today_date_print(array_screen)
today_search_count(js_file,search_region,array_screen)
for x in range(5):
for y in range(22):
if array_screen[x][y] == 1:
array_screen[x][y] += 4
today_compare_data(js_file, js_file_yesterday, search_region, compare_cmp, array_screen)
yesterday_date_print(array_screen)
yesterday_search_count(js_file_yesterday,search_region,array_screen)
for x in range(6,11):
for y in range(22):
if array_screen[x][y] == 1:
array_screen[x][y] += 5
yesterday_compare_data(js_file_yesterday, js_file_b_yesterday, search_region, compare_cmp, array_screen)
b_yesterday_date_print(array_screen)
b_yesterday_search_count(js_file_b_yesterday,search_region,array_screen)
for x in range(11,16):
for y in range(22):
if array_screen[x][y] == 1:
array_screen[x][y] += 6
b_yesterday_compare_data(js_file_b_yesterday, js_file_b_b_yesterday, search_region, compare_cmp, array_screen)
draw_matrix(array_screen);print()
if search_region == '0': # 0을 입력하면 메뉴로 복귀
compare_cmp = []
main_menu = 0
break
while menu_choice == 3: # 경기 세부지역 확진자 수 검색
js_file = 'koreaData_Gyeonggi'+ '_'+ a + '.js'
js_file_yesterday = 'koreaData_Gyeonggi'+ '_'+ b + '.js'
js_file_b_yesterday = 'koreaData_Gyeonggi'+ '_' + c + '.js'
js_file_b_b_yesterday = 'koreaData_Gyeonggi'+ '_' + d + '.js'
search_region = input("지역을 입력하세요 (ex:수원): ")
clear_array(array_screen)
draw_matrix(array_screen);print()
today_date_print(array_screen)
today_search_count(js_file,search_region,array_screen)
today_compare_data(js_file, js_file_yesterday, search_region, compare_cmp, array_screen)
yesterday_date_print(array_screen)
yesterday_search_count(js_file_yesterday,search_region,array_screen)
yesterday_compare_data(js_file_yesterday, js_file_b_yesterday, search_region, compare_cmp, array_screen)
b_yesterday_date_print(array_screen)
b_yesterday_search_count(js_file_b_yesterday,search_region,array_screen)
b_yesterday_compare_data(js_file_b_yesterday, js_file_b_b_yesterday, search_region, compare_cmp, array_screen)
draw_matrix(array_screen);print()
if search_region == '0': # 0을 입력하면 메뉴로 복귀
compare_cmp = []
main_menu = 0
break
if menu_choice == 4: # 메뉴 종료
menu = 0
| 36.497817 | 119 | 0.47978 | [
"Apache-2.0"
] | ekqls3301/osscap2020 | crawling_update(LED)/main_s.py | 17,292 | Python |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
import cv2
from model.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
import os
# import tensorflow as tf
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# todo: get cls_filter blobs['clp_filter']
# wn modified
# 1. get file path
sep = '/'
clp_file_format = '.npy'
clp_file_store = 'CloudPoints'
img_path = roidb[0]['image']
img_path_arr = img_path.split(sep)
prefix = img_path_arr[:-2]
file_name = img_path_arr[-1].split('.')[0] + clp_file_format
clp_path = os.path.join(sep.join(prefix), clp_file_store, file_name)
# 2. get cls data [?, 2]
valid_points= np.load(clp_path) # [?, 2]
# todo: width & height is not fixed
width_ori = roidb[0]['height'] # 322
height_ori = roidb[0]['width'] # 500
clp_ori = np.zeros([width_ori, height_ori], dtype=np.float32) # 初始化
clp_ori[tuple((valid_points.T[1, :], valid_points.T[0, :]))] = 1 # 设置存在点云的网格值为1 [322,500]
# 3.resize cls [322,500] =》[600,932] (同图片的操作)
clp_reshape = np.empty([width_ori, height_ori, 3], dtype=np.float32)
for i in range(3):
clp_reshape[0:width_ori, 0:height_ori, i] = clp_ori
clp_res = cv2.resize(clp_reshape, None, None, fx=im_scales[0], fy=im_scales[0], interpolation=cv2.INTER_LINEAR)
clp_res = clp_res[:, :, 0] # [600,932]
clp_res[clp_res > 0] = 1 # >0的值均设置成1
width = clp_res.shape[0]
height = clp_res.shape[1]
clp_res = clp_res.reshape([1, width, height, 1])
blobs['clp_info'] = clp_res # [1,600,932,1]
# 4. Max pooling
# width = clp_res.shape[0] # 600
# height = clp_res.shape[1] # 932
# clp_res = clp_res.reshape([1, width, height, 1])
# clp_filter = tf.constant(clp_res)
# clp_filter_reshape = tf.reshape(clp_filter, [1, width, height, 1])
#
# clp_pooling = tf.nn.max_pool(clp_filter_reshape, [1, 16, 16, 1], [1, 16, 16, 1], padding='SAME') # self._feat_stride[0] = 16
# clp_pooling = clp_pooling[0, :, :, 0]
# print("pooling: " + str(clp_pooling.shape))
# blobs['clp_filter'] = clp_pooling # [38, 59] (同特征图net_conv尺寸一致)
# gt boxes: (x1, y1, x2, y2, cls)
if cfg.TRAIN.USE_ALL_GT:
# Include all ground truth boxes
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
else:
# For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[im_blob.shape[1], im_blob.shape[2], im_scales[0]],
dtype=np.float32)
return blobs
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
| 35.23622 | 128 | 0.654972 | [
"MIT"
] | wennieWN/endernewton_tf-faster-rcnn | lib/roi_data_layer/minibatch.py | 4,553 | Python |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from drive_ros_msgs/mav_cc16_IMU.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import std_msgs.msg
class mav_cc16_IMU(genpy.Message):
_md5sum = "ea5bbf17106eb0f69246a582d49f7ab1"
_type = "drive_ros_msgs/mav_cc16_IMU"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# Automatically Generated in 2017-06-12 22:33:47.452851
# MESSAGE: IMU
# Description: Measurement of 9DOF Inertial Measurement Unit (IMU)
Header header
uint8 ID = 128
uint8 sysid
uint8 compid
geometry_msgs/Vector3 acc # Linear acceleration [g]
geometry_msgs/Vector3 gyro # Angular velocity [rad/s]
geometry_msgs/Vector3 mag # Magnetic field strength [T]
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
# It is only meant to represent a direction. Therefore, it does not
# make sense to apply a translation to it (e.g., when applying a
# generic rigid transformation to a Vector3, tf2 will only apply the
# rotation). If you want your data to be translatable too, use the
# geometry_msgs/Point message instead.
float64 x
float64 y
float64 z"""
# Pseudo-constants
ID = 128
__slots__ = ['header','sysid','compid','acc','gyro','mag']
_slot_types = ['std_msgs/Header','uint8','uint8','geometry_msgs/Vector3','geometry_msgs/Vector3','geometry_msgs/Vector3']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,sysid,compid,acc,gyro,mag
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(mav_cc16_IMU, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.sysid is None:
self.sysid = 0
if self.compid is None:
self.compid = 0
if self.acc is None:
self.acc = geometry_msgs.msg.Vector3()
if self.gyro is None:
self.gyro = geometry_msgs.msg.Vector3()
if self.mag is None:
self.mag = geometry_msgs.msg.Vector3()
else:
self.header = std_msgs.msg.Header()
self.sysid = 0
self.compid = 0
self.acc = geometry_msgs.msg.Vector3()
self.gyro = geometry_msgs.msg.Vector3()
self.mag = geometry_msgs.msg.Vector3()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2B9d().pack(_x.sysid, _x.compid, _x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.acc is None:
self.acc = geometry_msgs.msg.Vector3()
if self.gyro is None:
self.gyro = geometry_msgs.msg.Vector3()
if self.mag is None:
self.mag = geometry_msgs.msg.Vector3()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 74
(_x.sysid, _x.compid, _x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z,) = _get_struct_2B9d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2B9d().pack(_x.sysid, _x.compid, _x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.acc is None:
self.acc = geometry_msgs.msg.Vector3()
if self.gyro is None:
self.gyro = geometry_msgs.msg.Vector3()
if self.mag is None:
self.mag = geometry_msgs.msg.Vector3()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 74
(_x.sysid, _x.compid, _x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z,) = _get_struct_2B9d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_2B9d = None
def _get_struct_2B9d():
global _struct_2B9d
if _struct_2B9d is None:
_struct_2B9d = struct.Struct("<2B9d")
return _struct_2B9d
| 36.377119 | 165 | 0.646942 | [
"MIT"
] | jessecha/OPCAS | Catkin_PKG_Car/devel/lib/python2.7/dist-packages/drive_ros_msgs/msg/_mav_cc16_IMU.py | 8,585 | Python |
# Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from io_scene_gltf2.io.com import gltf2_io
from io_scene_gltf2.blender.exp import gltf2_blender_gather_texture_info, gltf2_blender_search_node_tree
from io_scene_gltf2.blender.exp import gltf2_blender_get
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
from io_scene_gltf2.io.com.gltf2_io_debug import print_console
from io_scene_gltf2.io.exp.gltf2_io_user_extensions import export_user_extensions
@cached
def gather_material_pbr_metallic_roughness(blender_material, orm_texture, export_settings):
if not __filter_pbr_material(blender_material, export_settings):
return None
material = gltf2_io.MaterialPBRMetallicRoughness(
base_color_factor=__gather_base_color_factor(blender_material, export_settings),
base_color_texture=__gather_base_color_texture(blender_material, export_settings),
extensions=__gather_extensions(blender_material, export_settings),
extras=__gather_extras(blender_material, export_settings),
metallic_factor=__gather_metallic_factor(blender_material, export_settings),
metallic_roughness_texture=__gather_metallic_roughness_texture(blender_material, orm_texture, export_settings),
roughness_factor=__gather_roughness_factor(blender_material, export_settings)
)
export_user_extensions('gather_material_pbr_metallic_roughness_hook', export_settings, material, blender_material, orm_texture)
return material
def __filter_pbr_material(blender_material, export_settings):
return True
def __gather_base_color_factor(blender_material, export_settings):
alpha_socket = gltf2_blender_get.get_socket(blender_material, "Alpha")
alpha = alpha_socket.default_value if alpha_socket is not None and not alpha_socket.is_linked else 1.0
base_color_socket = gltf2_blender_get.get_socket(blender_material, "Base Color")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket(blender_material, "BaseColor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket_old(blender_material, "BaseColorFactor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket(blender_material, "Background")
if not isinstance(base_color_socket, bpy.types.NodeSocket):
return None
if not base_color_socket.is_linked:
return list(base_color_socket.default_value)[:3] + [alpha]
texture_node = __get_tex_from_socket(base_color_socket)
if texture_node is None:
return None
def is_valid_multiply_node(node):
return isinstance(node, bpy.types.ShaderNodeMixRGB) and \
node.blend_type == "MULTIPLY" and \
len(node.inputs) == 3
multiply_node = next((link.from_node for link in texture_node.path if is_valid_multiply_node(link.from_node)), None)
if multiply_node is None:
return None
def is_factor_socket(socket):
return isinstance(socket, bpy.types.NodeSocketColor) and \
(not socket.is_linked or socket.links[0] not in texture_node.path)
factor_socket = next((socket for socket in multiply_node.inputs if is_factor_socket(socket)), None)
if factor_socket is None:
return None
if factor_socket.is_linked:
print_console("WARNING", "BaseColorFactor only supports sockets without links (in Node '{}')."
.format(multiply_node.name))
return None
return list(factor_socket.default_value)[:3] + [alpha]
def __gather_base_color_texture(blender_material, export_settings):
base_color_socket = gltf2_blender_get.get_socket(blender_material, "Base Color")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket(blender_material, "BaseColor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket_old(blender_material, "BaseColor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket(blender_material, "Background")
alpha_socket = gltf2_blender_get.get_socket(blender_material, "Alpha")
if alpha_socket is not None and alpha_socket.is_linked:
inputs = (base_color_socket, alpha_socket, )
else:
inputs = (base_color_socket,)
return gltf2_blender_gather_texture_info.gather_texture_info(inputs, export_settings)
def __get_tex_from_socket(blender_shader_socket: bpy.types.NodeSocket):
result = gltf2_blender_search_node_tree.from_socket(
blender_shader_socket,
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeTexImage))
if not result:
return None
return result[0]
def __gather_extensions(blender_material, export_settings):
return None
def __gather_extras(blender_material, export_settings):
return None
def __gather_metallic_factor(blender_material, export_settings):
metallic_socket = gltf2_blender_get.get_socket(blender_material, "Metallic")
if metallic_socket is None:
metallic_socket = gltf2_blender_get.get_socket_old(blender_material, "MetallicFactor")
if isinstance(metallic_socket, bpy.types.NodeSocket) and not metallic_socket.is_linked:
return metallic_socket.default_value
return None
def __gather_metallic_roughness_texture(blender_material, orm_texture, export_settings):
if orm_texture is not None:
texture_input = orm_texture
else:
metallic_socket = gltf2_blender_get.get_socket(blender_material, "Metallic")
roughness_socket = gltf2_blender_get.get_socket(blender_material, "Roughness")
hasMetal = metallic_socket is not None and __has_image_node_from_socket(metallic_socket)
hasRough = roughness_socket is not None and __has_image_node_from_socket(roughness_socket)
if not hasMetal and not hasRough:
metallic_roughness = gltf2_blender_get.get_socket_old(blender_material, "MetallicRoughness")
if metallic_roughness is None or not __has_image_node_from_socket(metallic_roughness):
return None
texture_input = (metallic_roughness,)
elif not hasMetal:
texture_input = (roughness_socket,)
elif not hasRough:
texture_input = (metallic_socket,)
else:
texture_input = (metallic_socket, roughness_socket)
return gltf2_blender_gather_texture_info.gather_texture_info(texture_input, export_settings)
def __gather_roughness_factor(blender_material, export_settings):
roughness_socket = gltf2_blender_get.get_socket(blender_material, "Roughness")
if roughness_socket is None:
roughness_socket = gltf2_blender_get.get_socket_old(blender_material, "RoughnessFactor")
if isinstance(roughness_socket, bpy.types.NodeSocket) and not roughness_socket.is_linked:
return roughness_socket.default_value
return None
def __has_image_node_from_socket(socket):
result = gltf2_blender_search_node_tree.from_socket(
socket,
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeTexImage))
if not result:
return False
return True
| 43.466292 | 131 | 0.769937 | [
"Apache-2.0"
] | VitusW42/glTF-Blender-IO | addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_materials_pbr_metallic_roughness.py | 7,737 | Python |
"""A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages, Extension
from os import path
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
import glob
import sys
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Precompile the cppyy module
#import subprocess
#if not "--help" in sys.argv and ("install" in sys.argv or "build" in sys.argv):
# subprocess.check_call(["make", "debuglib"], cwd="src/cpp_code")
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='mocos_helper', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.18', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='Helper code for MOCOS-COVID19/modelling-ncov2019', # Optional
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/michalsta/mocos_helper', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Michał Startek, Mateusz Łącki', # Optional
# This should be a valid email address corresponding to the author listed
# above.
#author_email='', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 2 - Pre-Alpha',
# Indicate who your project is intended for
"Topic :: Scientific/Engineering :: Bio-Informatics",
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# These classifiers are *not* checked by 'pip install'. See instead
# 'python_requires' below.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='covid-19 modelling', # Optional
# When your source code is in a subdirectory under the project root, e.g.
# `src/`, it is necessary to specify the `package_dir` argument.
package_dir={'mocos_helper': 'src'}, # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=['mocos_helper'], #find_packages(where='src'), # Required
# Specify which Python versions you support. In contrast to the
# 'Programming Language' classifiers above, 'pip install' will check this
# and refuse to install the project if the version does not match. If you
# do not support Python 2, you can simplify this to '>=3.5' or similar, see
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['cppyy'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
#extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
package_data={ # Optional
'mocos_helper': ["cpp_code/*"],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
#entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
#project_urls={ # Optional
# 'Bug Reports': 'https://github.com/pypa/sampleproject/issues',
# 'Funding': 'https://donate.pypi.org',
# 'Say Thanks!': 'http://saythanks.io/to/example',
# 'Source': 'https://github.com/pypa/sampleproject/',
#},
zip_safe = False
)
| 42.616438 | 98 | 0.68531 | [
"MIT"
] | michalsta/mocos_helper | setup.py | 9,336 | Python |
import os
import pdb
import sys
import tempfile
sys.path.append("/opt/tosca")
from translator.toscalib.tosca_template import ToscaTemplate
from core.models import Instance,User,Network,NetworkTemplate,Port
from xosresource import XOSResource
class XOSPort(XOSResource):
provides = ["tosca.nodes.network.Port"]
xos_model = Port
def get_existing_objs(self):
# Port objects have no name, their unique key is (instance, network)
args = self.get_xos_args(throw_exception=False)
instance = args.get('instance',None)
network = args.get('network',None)
if (not instance) or (not network):
return []
return self.xos_model.objects.filter(**{'instance': instance, 'network': network})
def get_xos_args(self, throw_exception=True):
args = {}
instance_name = self.get_requirement("tosca.relationships.network.BindsTo")
if instance_name:
args["instance"] = self.get_xos_object(Instance, throw_exception, name=instance_name)
net_name = self.get_requirement("tosca.relationships.network.LinksTo")
if net_name:
args["network"] = self.get_xos_object(Network, throw_exception, name=net_name)
return args
def postprocess(self, obj):
pass
def create(self):
xos_args = self.get_xos_args()
if not xos_args.get("instance", None):
raise Exception("Must specify slver when creating port")
if not xos_args.get("network", None):
raise Exception("Must specify network when creating port")
port = Port(**xos_args)
port.caller = self.user
port.save()
self.postprocess(port)
self.info("Created Port '%s' connect instance '%s' to network %s" % (str(port), str(port.instance), str(port.network)))
def delete(self, obj):
super(XOSPort, self).delete(obj)
| 30.66129 | 127 | 0.662809 | [
"Apache-2.0"
] | xmaruto/mcord | xos/tosca/resources/port.py | 1,901 | Python |
"""User defaults
Revision ID: 30b25dd39af0
Revises: 46b85e11f48f
Create Date: 2015-02-12 15:34:59.515740
"""
# revision identifiers, used by Alembic.
revision = '30b25dd39af0'
down_revision = '46b85e11f48f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('user', 'email',
existing_type=sa.VARCHAR(length=255),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('user', 'email',
existing_type=sa.VARCHAR(length=255),
nullable=True)
### end Alembic commands ###
| 23.272727 | 63 | 0.657552 | [
"Apache-2.0"
] | Lunga001/pmg-cms-2 | migrations/versions/30b25dd39af0_user_defaults.py | 768 | Python |
numbers = input().split(', ')
numbers_list = list(map(int, numbers))
even_list = []
for i in range (len(numbers_list)):
if numbers_list[i] % 2 == 0:
even_list.append(i)
print(even_list)
# found_indices = map(lambda x: x if numbers_list[x] % 2 == 0 else 'no', range(len(numbers_list)))
# even_indices = list(filter(lambda a: a != 'no', found_indices))
# print(even_indices)
| 29.769231 | 98 | 0.664083 | [
"MIT"
] | GYosifov88/Python-Fundamentals | even_numbers_list_advanced.py | 387 | Python |
import os
from django.conf import settings
DOCUMENT_SIGNATURE_DETACHED_ACTION_CLASS_PATH = 'mayan.apps.document_signatures.workflow_actions.DocumentSignatureDetachedAction'
DOCUMENT_SIGNATURE_EMBEDDED_ACTION_CLASS_PATH = 'mayan.apps.document_signatures.workflow_actions.DocumentSignatureEmbeddedAction'
TEST_SIGNED_DOCUMENT_PATH = os.path.join(
settings.BASE_DIR, 'apps', 'document_signatures', 'tests', 'contrib',
'sample_documents', 'mayan_11_1.pdf.gpg'
)
TEST_SIGNATURE_FILE_PATH = os.path.join(
settings.BASE_DIR, 'apps', 'document_signatures', 'tests', 'contrib',
'sample_documents', 'mayan_11_1.pdf.sig'
)
TEST_SIGNATURE_ID = 'XVkoGKw35yU1iq11dZPiv7uAY7k'
| 42.5 | 129 | 0.816176 | [
"Apache-2.0"
] | DRCIT/Mayan-EDMS | mayan/apps/document_signatures/tests/literals.py | 680 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.KbdishMaterialInfo import KbdishMaterialInfo
class KoubeiCateringDishMaterialDeleteResponse(AlipayResponse):
def __init__(self):
super(KoubeiCateringDishMaterialDeleteResponse, self).__init__()
self._kb_dish_material_info = None
@property
def kb_dish_material_info(self):
return self._kb_dish_material_info
@kb_dish_material_info.setter
def kb_dish_material_info(self, value):
if isinstance(value, KbdishMaterialInfo):
self._kb_dish_material_info = value
else:
self._kb_dish_material_info = KbdishMaterialInfo.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(KoubeiCateringDishMaterialDeleteResponse, self).parse_response_content(response_content)
if 'kb_dish_material_info' in response:
self.kb_dish_material_info = response['kb_dish_material_info']
| 35.633333 | 113 | 0.755847 | [
"Apache-2.0"
] | Anning01/alipay-sdk-python-all | alipay/aop/api/response/KoubeiCateringDishMaterialDeleteResponse.py | 1,069 | Python |
import unittest
def suite():
return unittest.TestLoader().discover("garbageday.tests", pattern="*.py")
| 17.666667 | 74 | 0.735849 | [
"MIT"
] | josylad/RoomScout | garbageday/tests/__init__.py | 106 | Python |
from __future__ import print_function
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import time
# Options for mode 'lower_level'
MODE = 'S-4mu_WigD'
label_size = 28
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
mpl.rc('font', family='serif', size=34, serif="Times New Roman")
#mpl.rcParams['text.usetex'] = True
#mpl.rcParams['text.latex.preamble'] = [r'\boldmath']
mpl.rcParams['legend.fontsize'] = "medium"
mpl.rc('savefig', format ="pdf")
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
mpl.rcParams['figure.figsize'] = 8, 6
mpl.rcParams['lines.linewidth'] = 3
def binomial_error(l1):
err_list = []
for item in l1:
if item==1. or item==0.: err_list.append(np.sqrt(100./101.*(1.-100./101.)/101.))
else: err_list.append(np.sqrt(item*(1.-item)/100.))
return err_list
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# S - 4 mu WigD
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
if MODE == 'S-4mu_WigD':
#param_list = [0.1,0.08,0.06,0.04,0.02,0.0]
param_list = [0.1,0.08,0.06,0.04]
param_list = [0.0,0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1]
param_list = [0.05,0.1,0.3,0.5,0.7,1.]
ml_classifiers = ['nn','bdt']
ml_classifiers_colors = ['green','magenta','cyan']
ml_classifiers_bin = 5
chi2_color = 'red'
chi2_splits = [1,2,3,4,5,6,7,8,9,10]
#chi2_splits = [8]
ml_folder_name = "S-4mu_WigD/evaluation_S-VV-4mu_WigD_updated10"
chi2_folder_name = "S-4mu_WigD"
#chi2_folder_name = "event_shapes_lower_level_without_Mult"
ml_file_name = "{1}_S-VV-4mu_WigD_updated10_{0}_syst_0_01__chi2scoring_5_p_values"
chi2_file_name = "S-4mu_WigD_updated10_{0}D_chi2_{1}_splits_p_values"
#chi2_file_name = "event_shapes_lower_level_syst_0_01_attempt4_without_Mult__{0}D_chi2_{1}_splits_p_values"
chi2_1D_file_name = "S-4mu_WigD_updated10_1D_{0}D_chi2_{1}_splits_p_values"
chi2_m1D_file_name = "S-4mu_WigD_updated10_m1D_{0}D_chi2_{1}_splits_p_values"
title = "S-4mu"
name = "S-4mu"
CL = 0.95
ml_classifiers_dict={}
chi2_splits_dict={}
chi2_1D_splits_dict={}
chi2_m1D_splits_dict={}
#xwidth = [0.5]*len(param_list)
xwidth = np.subtract(param_list[1:],param_list[:-1])/2.
xwidth_left = np.append(xwidth[0] , xwidth)
xwidth_right = np.append(xwidth,xwidth[-1])
print("xwidth : ", xwidth)
fig = plt.figure()
ax = fig.add_axes([0.2,0.15,0.75,0.8])
if False:
for ml_classifier_index, ml_classifier in enumerate(ml_classifiers):
ml_classifiers_dict[ml_classifier]= []
for param in param_list:
p_values = np.loadtxt(os.environ['learningml']+"/GoF/optimisation_and_evaluation/"+ml_folder_name+"/"+ml_classifier+"/"+ml_file_name.format(param,ml_classifier,ml_classifiers_bin)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
ml_classifiers_dict[ml_classifier].append(p_values_in_CL)
ml_classifiers_dict[ml_classifier]= np.divide(ml_classifiers_dict[ml_classifier],100.)
ax.errorbar(param_list,ml_classifiers_dict['nn'], yerr=binomial_error(ml_classifiers_dict['nn']), linestyle='-', marker='s', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[0], label=r'$ANN$',clip_on=False)
print("bdt : ", ml_classifiers_dict['bdt'])
ax.errorbar(param_list,ml_classifiers_dict['bdt'], yerr=binomial_error(ml_classifiers_dict['bdt']), linestyle='-', marker='o', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[1], label=r'$BDT$', clip_on=False)
for chi2_split_index, chi2_split in enumerate(chi2_splits):
chi2_splits_dict[str(chi2_split)]=[]
chi2_best = []
for param in param_list:
chi2_best_dim = []
for chi2_split_index, chi2_split in enumerate(chi2_splits):
p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_folder_name+"/"+chi2_file_name.format(param,chi2_split)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
temp = float(p_values_in_CL) /100.
chi2_splits_dict[str(chi2_split)].append(temp)
chi2_best_dim.append(temp)
temp_best = np.max(chi2_best_dim)
#print(str(dim)+"D chi2_best_dim : ", chi2_best_dim)
#print(str(dim)+"D temp_best : ",np.max(temp_best))
chi2_best.append(temp_best)
#print("chi2_best : ",chi2_best)
for chi2_split_index, chi2_split in enumerate(chi2_splits):
chi2_1D_splits_dict[str(chi2_split)]=[]
chi2_1D_best = []
for param in param_list:
chi2_1D_best_dim = []
for chi2_split_index, chi2_split in enumerate(chi2_splits):
p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_folder_name+"/"+chi2_1D_file_name.format(param,chi2_split)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
temp = float(p_values_in_CL) /100.
chi2_1D_splits_dict[str(chi2_split)].append(temp)
chi2_1D_best_dim.append(temp)
temp_best = np.max(chi2_1D_best_dim)
#print(str(dim)+"D chi2_best_dim : ", chi2_best_dim)
#print(str(dim)+"D temp_best : ",np.max(temp_best))
chi2_1D_best.append(temp_best)
#print("chi2_best : ",chi2_best)
for chi2_split_index, chi2_split in enumerate(chi2_splits):
chi2_m1D_splits_dict[str(chi2_split)]=[]
chi2_m1D_best = []
for param in param_list:
chi2_m1D_best_dim = []
for chi2_split_index, chi2_split in enumerate(chi2_splits):
p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_folder_name+"/"+chi2_m1D_file_name.format(param,chi2_split)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
temp = float(p_values_in_CL) /100.
chi2_m1D_splits_dict[str(chi2_split)].append(temp)
chi2_m1D_best_dim.append(temp)
temp_best = np.max(chi2_m1D_best_dim)
#print(str(dim)+"D chi2_best_dim : ", chi2_best_dim)
#print(str(dim)+"D temp_best : ",np.max(temp_best))
chi2_m1D_best.append(temp_best)
#print("chi2_best : ",chi2_best)
print("param_list : ",param_list)
print("chi2_best : ", chi2_best)
print("chi2_splits_dict : ", chi2_splits_dict)
ax.errorbar(param_list,chi2_best, yerr=binomial_error(chi2_best), linestyle='--', marker='$\chi$', markeredgecolor='none', markersize=18, color='black', label=r'$\chi^2 w/\_mass$', clip_on=False)
ax.errorbar(param_list,chi2_1D_best, yerr=binomial_error(chi2_1D_best), linestyle='--', marker='$\chi$', markeredgecolor='none', markersize=18, color='blue', label=r'$\chi^2 only\_mass$', clip_on=False)
ax.errorbar(param_list,chi2_m1D_best, yerr=binomial_error(chi2_m1D_best), linestyle='--', marker='$\chi$', markeredgecolor='none', markersize=18, color='red', label=r'$\chi^2 w/o\_mass$', clip_on=False)
print("ml_classifiers_dict : ",ml_classifiers_dict)
print("chi2_best : ", chi2_best)
#ax.plot((0.1365,0.1365),(0.,1.),c="grey",linestyle="--")
ax.set_xlim([0.,1.])
#ax.set_xlim([0.129,0.1405])
ax.set_ylim([0.,1.])
ax.set_xlabel(r"$p_{signal}$")
ax.set_ylabel("Fraction rejected")
plt.legend(frameon=False, numpoints=1)
#a, b, c = [0.130,0.133], [0.1365],[0.14]
#ax.set_xticks(a+b+c)
#xx, locs = plt.xticks()
#ll = ['%.3f' % y for y in a] + ['%.4f' % y for y in b] + ['%.3f' % y for y in c]
#plt.xticks(xx, ll)
#ax.legend(loc='lower left', frameon=False, numpoints=1)
fig_leg = plt.figure(figsize=(8,2.7))
ax_leg = fig_leg.add_axes([0.0,0.0,1.0,1.0])
plt.tick_params(axis='x',which='both',bottom='off', top='off', labelbottom='off')
plt.tick_params(axis='y',which='both',bottom='off', top='off', labelbottom='off')
ax_leg.yaxis.set_ticks_position('none')
ax_leg.set_frame_on(False)
plt.figlegend(*ax.get_legend_handles_labels(), loc = 'upper left',frameon=False, numpoints=1,ncol=2)
fig_leg.savefig("S-4mu_WigD_updated10_analysis_legend.pdf")
#fig_name=name+"_alphaSvalue_analysis"
fig_name="S-4mu_WigD_updated10_analysis"
fig.savefig(fig_name+".pdf")
fig.savefig(fig_name+"_"+time.strftime("%b_%d_%Y")+".pdf")
print("Saved the figure as" , fig_name+".pdf")
| 53.980583 | 816 | 0.495594 | [
"MIT"
] | weissercn/learningml | learningml/GoF/analysis/S-4mu/plot_S-4mu_updated10_alphaSvalue_analysis.py | 11,120 | Python |
try:
from os import system
from os.path import isdir,isfile
from time import sleep
from npc import NPC
from tutorial import text
import pack
import sys
from requests import get
if not isfile('./config'):
open('config','w').write('firmware: https://raw.githubusercontent.com/miko1112/comp9/main/firmware.c9s')
config={l.split(': ')[0]:l.split(': ',1)[1] for l in open('config').read().split('\n')}
def connected():
try:
get('https://example.com',2)
return True
except:
return False
hasinternet=connected()
cls=lambda: system('cls')
elseflag=False
variables={}
help=''' help Display this screen
tut Read tutorial
cls Clear screen
echo Display text in the console
exec Execute a script file
bind Bind a set of commands to one command
var Set a variable
list Shows all active variables
wait Stop for a given amount of seconds
input Take input into variable
run Execute a command
while Repeat some code while condition is not zero
if Run some code if condition is not zero
else Run some code if previous if statement failed
program Run an installed program
install Installs a package from the internet
exit Shut down COMP-9
Append ~h to a command (<command> ~h) to get some in
detail help on said command.'''
def setvar(*a):
global variables
variables[a[0]]=' '.join(a[1:])
def readtutorial(*a):
page=text[int(a[0])-1]
cls()
print(page)
def loadscript(*a):
sn=a[0]
code=open(sn).read()
execscript(code)
def binding(*a):
commands[a[0]]=('User defined command "'+a[0]+'"','seq',' '.join(a[1:]))
def wait(*a):
sleep(float(a[0]))
def takein(*a):
global variables
variables[a[0]]=input(' '.join(a[1:])+' ')
def run(*a):
if a[0]=='exit':
cls()
sys.exit()
execscript(' '.join(a))
def whileloop(*a):
og_condition=a[0]
while True:
calcondition=og_condition
for vn in variables.keys():
calcondition=calcondition.replace('$'+vn,variables[vn])
if calcondition!='0':
execscript(' '.join(a[1:]))
else:
break
def ifstate(*a):
global elseflag
if a[0]!='0':
execscript(' '.join(a[1:]))
elseflag=False
else:
elseflag=True
def elsestate(*a):
global elseflag
if elseflag:
execscript(' '.join(a))
elseflag=False
def program(*a):
ogscript=open('./packages/'+' '.join(a)+'/main.c9s').read()
execscript(ogscript.replace('exec .','exec ./packages/'+' '.join(a)))
def install(*a):
if pack.installpackage(get(' '.join(a)).content.decode('utf8')):
print(' Failed to install package')
else:
print(' Successfully installed package at '+' '.join(a))
def uninstall(*a):
pack.uninstallpackage(' '.join(a))
def listvar(*_):
for k in variables.keys():
print(' '+k)
commands={
'help':('Displays all the commands and what they do','disp',help),
'exit':('Exits COMP-9','func',cls,sys.exit),
'cls':('Clears the screen','func',cls),
'echo':('Displays text\n echo <text>','comp',print),
'exec':('Executes a script\n exec <script location>','comp',loadscript),
'bind':('Binds a sequence of commands to one command\n bind <name> <command1;command2;...>','comp',binding),
'tut':('Shows a page of the tutorial\n tut <page>','comp',readtutorial),
'var':('Sets a variable\n var <name> <value>','comp',setvar),
'wait':('Waits a given amount of seconds\n wait <time>','comp',wait),
'input':('Takes input and puts it into a variable\n input <variable name> <query>','comp',takein),
'run':('Executes one command\n run <command>','comp',run),
'while':('While the given condition is not 0, execute a command\n while <condition> <command>','comp',whileloop),
'if':('If the given condition is not 0, execute a command\n if <condition> <command>','comp',ifstate),
'else':('If a previous condition proves false, execute a command\n else <command>','comp',elsestate),
'program':('Runs an installed program\n program <name>','comp',program),
'install':('Installs a package from the internet\n install <link to raw text>','comp',install),
'uninstall':('Uninstalls a package\n uninstall <name>','comp',uninstall),
'list':('Lists all active variables','comp',listvar),
}
def execscript(t):
for l in t.split('\n'):
if l.strip()==''or l.strip().startswith('//'):continue
if l.strip().split(' ')[0]in commands.keys():
if execomm(commands[l.strip().split(' ')[0]],*l.strip().split(' ')[1:]):
print(' Bad syntax "'+l+'"')
break
else:
print(' Unknown command "'+l+'"')
break
def execomm(c,*i):
ct=c[1]
if len(i)>0:
if i[0]=='~h':
print(' '+c[0])
return False
if ct=='disp':
print(c[2])
return False
elif ct=='func':
for f in c[2:]:
f()
return False
elif ct=='comp':
try:
proparg=list(i)
for ind in range(len(proparg)):
for vn in variables.keys():
proparg[ind]=proparg[ind].replace('$'+vn,variables[vn])
c[2](*proparg)
return False
except SystemExit:
sys.exit()
except Exception as e:
return True
elif ct=='seq':
execscript(c[2].replace(';','\n'))
return False
system('title COMP-9')
execscript('install '+config['firmware']+'\nprogram __SYSTEM')
#execscript('exec ./system_scripts/system.c9s')
#cls()
except KeyboardInterrupt:pass
| 32.24 | 118 | 0.602269 | [
"MIT"
] | miko1112/comp9 | core.py | 5,642 | Python |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torchvision import models
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_normal_(m.weight.data, gain=0.02)
elif classname.find('Linear') != -1:
init.xavier_normal_(m.weight.data, gain=0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
class FeatureExtraction(nn.Module):
def __init__(self, input_nc, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(FeatureExtraction, self).__init__()
downconv = nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1)
model = [downconv, nn.ReLU(True), norm_layer(ngf)]
for i in range(n_layers):
in_ngf = 2**i * ngf if 2**i * ngf < 512 else 512
out_ngf = 2**(i+1) * ngf if 2**i * ngf < 512 else 512
downconv = nn.Conv2d(in_ngf, out_ngf, kernel_size=4, stride=2, padding=1)
model += [downconv, nn.ReLU(True)]
model += [norm_layer(out_ngf)]
model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]
model += [norm_layer(512)]
model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]
self.model = nn.Sequential(*model)
init_weights(self.model, init_type='normal')
def forward(self, x):
return self.model(x)
class FeatureL2Norm(torch.nn.Module):
def __init__(self):
super(FeatureL2Norm, self).__init__()
def forward(self, feature):
epsilon = 1e-6
norm = torch.pow(torch.sum(torch.pow(feature,2),1)+epsilon,0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature,norm)
class FeatureCorrelation(nn.Module):
def __init__(self):
super(FeatureCorrelation, self).__init__()
def forward(self, feature_A, feature_B):
b,c,h,w = feature_A.size()
# reshape features for matrix multiplication
feature_A = feature_A.transpose(2,3).contiguous().view(b,c,h*w)
feature_B = feature_B.view(b,c,h*w).transpose(1,2)
# perform matrix mult.
feature_mul = torch.bmm(feature_B,feature_A)
correlation_tensor = feature_mul.view(b,h,w,h*w).transpose(2,3).transpose(1,2)
return correlation_tensor
class FeatureRegression(nn.Module):
def __init__(self, input_nc=512,output_dim=6):
super(FeatureRegression, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_nc, 512, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 256, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.linear = nn.Linear(64 * 4 * 3, output_dim)
self.tanh = nn.Tanh()
'''self.conv.to(device)
self.linear.to(device)
self.tanh.to(device)'''
def forward(self, x):
x = self.conv(x)
x = x.reshape(x.size(0), -1)
x = self.linear(x)
x = self.tanh(x)
return x
class TpsGridGen(nn.Module):
def __init__(self, out_h=256, out_w=192, use_regular_grid=True, grid_size=3, reg_factor=0):
super(TpsGridGen, self).__init__()
self.out_h, self.out_w = out_h, out_w
self.reg_factor = reg_factor
# create grid in numpy
self.grid = np.zeros( [self.out_h, self.out_w, 3], dtype=np.float32)
# sampling grid with dim-0 coords (Y)
self.grid_X,self.grid_Y = np.meshgrid(np.linspace(-1,1,out_w),np.linspace(-1,1,out_h))
# grid_X,grid_Y: size [1,H,W,1,1]
self.grid_X = torch.FloatTensor(self.grid_X).unsqueeze(0).unsqueeze(3)
self.grid_Y = torch.FloatTensor(self.grid_Y).unsqueeze(0).unsqueeze(3)
self.grid_X = self.grid_X.to(device)
self.grid_Y = self.grid_Y.to(device)
# initialize regular grid for control points P_i
if use_regular_grid:
axis_coords = np.linspace(-1,1,grid_size)
self.N = grid_size*grid_size
P_Y,P_X = np.meshgrid(axis_coords,axis_coords)
P_X = np.reshape(P_X,(-1,1)) # size (N,1)
P_Y = np.reshape(P_Y,(-1,1)) # size (N,1)
P_X = torch.FloatTensor(P_X)
P_X = P_X.to(device)
P_Y = torch.FloatTensor(P_Y)
P_Y = P_Y.to(device)
self.P_X_base = P_X.clone()
self.P_X_base = self.P_X_base.to(device)
self.P_Y_base = P_Y.clone()
self.P_Y_base = self.P_Y_base.to(device)
self.Li = self.compute_L_inverse(P_X,P_Y).unsqueeze(0)
self.P_X = P_X.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4)
self.P_Y = P_Y.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4)
def forward(self, theta):
warped_grid = self.apply_transformation(theta,torch.cat((self.grid_X,self.grid_Y),3))
return warped_grid
def compute_L_inverse(self,X,Y):
N = X.size()[0] # num of points (along dim 0)
# construct matrix K
Xmat = X.expand(N,N)
Ymat = Y.expand(N,N)
P_dist_squared = torch.pow(Xmat-Xmat.transpose(0,1),2)+torch.pow(Ymat-Ymat.transpose(0,1),2)
P_dist_squared[P_dist_squared==0]=1 # make diagonal 1 to avoid NaN in log computation
K = torch.mul(P_dist_squared,torch.log(P_dist_squared))
# construct matrix L
O = torch.FloatTensor(N,1).fill_(1)
O = O.to(device)
Z = torch.FloatTensor(3,3).fill_(0)
Z = Z.to(device)
P = torch.cat((O,X,Y),1)
L = torch.cat((torch.cat((K,P),1),torch.cat((P.transpose(0,1),Z),1)),0)
Li = torch.inverse(L)
Li = Li.to(device)
return Li
def apply_transformation(self,theta,points):
if theta.dim()==2:
theta = theta.unsqueeze(2).unsqueeze(3)
# points should be in the [B,H,W,2] format,
# where points[:,:,:,0] are the X coords
# and points[:,:,:,1] are the Y coords
# input are the corresponding control points P_i
batch_size = theta.size()[0]
# split theta into point coordinates
Q_X=theta[:,:self.N,:,:].squeeze(3)
Q_Y=theta[:,self.N:,:,:].squeeze(3)
Q_X = Q_X + self.P_X_base.expand_as(Q_X)
Q_Y = Q_Y + self.P_Y_base.expand_as(Q_Y)
# get spatial dimensions of points
points_b = points.size()[0]
points_h = points.size()[1]
points_w = points.size()[2]
# repeat pre-defined control points along spatial dimensions of points to be transformed
P_X = self.P_X.expand((1,points_h,points_w,1,self.N))
P_Y = self.P_Y.expand((1,points_h,points_w,1,self.N))
# compute weigths for non-linear part
W_X = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_X)
W_Y = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_Y)
# reshape
# W_X,W,Y: size [B,H,W,1,N]
W_X = W_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)
W_Y = W_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)
# compute weights for affine part
A_X = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_X)
A_Y = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_Y)
# reshape
# A_X,A,Y: size [B,H,W,1,3]
A_X = A_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)
A_Y = A_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)
# compute distance P_i - (grid_X,grid_Y)
# grid is expanded in point dim 4, but not in batch dim 0, as points P_X,P_Y are fixed for all batch
points_X_for_summation = points[:,:,:,0].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,0].size()+(1,self.N))
points_Y_for_summation = points[:,:,:,1].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,1].size()+(1,self.N))
if points_b==1:
delta_X = points_X_for_summation-P_X
delta_Y = points_Y_for_summation-P_Y
else:
# use expanded P_X,P_Y in batch dimension
delta_X = points_X_for_summation-P_X.expand_as(points_X_for_summation)
delta_Y = points_Y_for_summation-P_Y.expand_as(points_Y_for_summation)
dist_squared = torch.pow(delta_X,2)+torch.pow(delta_Y,2)
# U: size [1,H,W,1,N]
dist_squared[dist_squared==0]=1 # avoid NaN in log computation
U = torch.mul(dist_squared,torch.log(dist_squared))
# expand grid in batch dimension if necessary
points_X_batch = points[:,:,:,0].unsqueeze(3)
points_Y_batch = points[:,:,:,1].unsqueeze(3)
if points_b==1:
points_X_batch = points_X_batch.expand((batch_size,)+points_X_batch.size()[1:])
points_Y_batch = points_Y_batch.expand((batch_size,)+points_Y_batch.size()[1:])
points_X_prime = A_X[:,:,:,:,0]+ \
torch.mul(A_X[:,:,:,:,1],points_X_batch) + \
torch.mul(A_X[:,:,:,:,2],points_Y_batch) + \
torch.sum(torch.mul(W_X,U.expand_as(W_X)),4)
points_Y_prime = A_Y[:,:,:,:,0]+ \
torch.mul(A_Y[:,:,:,:,1],points_X_batch) + \
torch.mul(A_Y[:,:,:,:,2],points_Y_batch) + \
torch.sum(torch.mul(W_Y,U.expand_as(W_Y)),4)
return torch.cat((points_X_prime,points_Y_prime),3)
class GMM(nn.Module):
'''Geometric matching module
'''
def __init__(self, opt):
super(GMM, self).__init__()
self.extraction_agnostic = FeatureExtraction(22, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d)#.to(device)
self.extraction_cloth = FeatureExtraction(3, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d)#.to(device)
self.l2norm = FeatureL2Norm()#.to(device)
self.correlation = FeatureCorrelation()#.to(device)
self.regression_zero = FeatureRegression(input_nc=192, output_dim=2*opt.grid_size**2)#.to(device)
self.gridGen = TpsGridGen(opt.fine_height, opt.fine_width, grid_size=opt.grid_size)#.to(device)
self.extraction_warped_cloth = FeatureExtraction(3, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d)#.to(device)
self.regression_one = FeatureRegression(input_nc=192, output_dim=2*opt.grid_size**2)#.to(device)
def forward(self, agn, clt):
feature_agn = self.extraction_agnostic(agn)
feature_clt = self.extraction_cloth(clt)
feature_agn = self.l2norm(feature_agn)
feature_clt = self.l2norm(feature_clt)
coorelation_0 = self.correlation(feature_agn, feature_clt)
theta = self.regression_zero(coorelation_0)
grid_zero = self.gridGen(theta)
warped_coarse_cloth = F.grid_sample(clt, grid_zero, padding_mode='border')
feature_wc = self.extraction_warped_cloth(warped_coarse_cloth)
feature_wc = self.l2norm(feature_wc)
coorelation_1 = self.correlation(feature_agn, feature_wc)
delta_theta = self.regression_one(coorelation_1)
#here in original paper there is not much details of theta + delta theta
#so I have done element-wise addition
grid_one = self.gridGen(theta.add(delta_theta))
return grid_zero, theta, grid_one, delta_theta
| 42.122642 | 118 | 0.608585 | [
"MIT"
] | bhavyashahh/SieveNet | gmm.py | 13,395 | Python |
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views import generic
from django.contrib.auth.forms import UserCreationForm
from .models import Hall
def home(request):
return render(request, 'halls/home.html')
class SignUp(generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('home')
template_name = 'registration/signup.html'
class createHallsView(generic.CreateView):
model = Hall
fields = ['title']
template_name = 'halls/create_hall.html'
success_url = reverse_lazy('home')
| 29.7 | 55 | 0.737374 | [
"MIT"
] | MsNahid/Youtube-Hall | halls/views.py | 594 | Python |
'''
Created on Sep 3, 2012
@author: Daniel J. Rivers
'''
from DataAccess.TableData import TableData
from DataAccess.TableHandler import TableHandler
class EpisodeHandler( TableHandler ):
pass
class Episode( TableData ):
def __init__( self ):
self.columnNames = [ ( "SEASON_ID", "INTEGER" ), ( "EPISODE_NUM", "INTEGER" ), ( "FILE", "TEXT" ), ( "TOD", "TEXT" ) ]
self.tableName = "EPISODE"
self.where = 1
| 25.388889 | 127 | 0.619256 | [
"MIT"
] | ErebusMaligan/python | FileInventory/DataAccess/Tables/EpisodeHandler.py | 457 | Python |
import mmdet2trt.ops.util_ops as mm2trt_util
import torch
from mmdet2trt.models.builder import register_wraper
from mmdet2trt.models.dense_heads.anchor_free_head import AnchorFreeHeadWraper
@register_wraper('mmdet.models.FoveaHead')
class FoveaHeadWraper(AnchorFreeHeadWraper):
def __init__(self, module):
super(FoveaHeadWraper, self).__init__(module)
def forward(self, feat, x):
img_shape = x.shape[2:]
module = self.module
cfg = self.test_cfg
cls_scores, bbox_preds = module(feat)
mlvl_points = self.get_points(cls_scores, flatten=True)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, stride, base_len, (y, x) in zip(
cls_scores, bbox_preds, module.strides, module.base_edge_list,
mlvl_points):
scores = cls_score.permute(0, 2, 3, 1).reshape(
cls_score.shape[0], -1, module.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(bbox_pred.shape[0], -1,
4).exp()
x = x.unsqueeze(0) + 0.5
y = y.unsqueeze(0) + 0.5
x = x.expand_as(bbox_pred[:, :, 0])
y = y.expand_as(bbox_pred[:, :, 0])
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0:
# concate zero to enable topk,
# dirty way, will find a better way in future
scores = mm2trt_util.pad_with_value(scores, 1, nms_pre, 0.)
bbox_pred = mm2trt_util.pad_with_value(bbox_pred, 1, nms_pre)
y = mm2trt_util.pad_with_value(y, 1, nms_pre)
x = mm2trt_util.pad_with_value(x, 1, nms_pre)
# do topk
max_scores, _ = (scores).max(dim=2)
_, topk_inds = max_scores.topk(nms_pre, dim=1)
bbox_pred = mm2trt_util.gather_topk(bbox_pred, 1, topk_inds)
scores = mm2trt_util.gather_topk(scores, 1, topk_inds)
y = mm2trt_util.gather_topk(y, 1, topk_inds)
x = mm2trt_util.gather_topk(x, 1, topk_inds)
x1 = (stride * x - base_len * bbox_pred[:, :, 0]).\
clamp(min=0, max=img_shape[1] - 1)
y1 = (stride * y - base_len * bbox_pred[:, :, 1]).\
clamp(min=0, max=img_shape[0] - 1)
x2 = (stride * x + base_len * bbox_pred[:, :, 2]).\
clamp(min=0, max=img_shape[1] - 1)
y2 = (stride * y + base_len * bbox_pred[:, :, 3]).\
clamp(min=0, max=img_shape[0] - 1)
bboxes = torch.stack([x1, y1, x2, y2], -1)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
mlvl_scores = torch.cat(mlvl_scores, dim=1)
mlvl_proposals = mlvl_bboxes.unsqueeze(2)
max_scores, _ = mlvl_scores.max(dim=2)
topk_pre = max(1000, nms_pre)
_, topk_inds = max_scores.topk(
min(topk_pre, mlvl_scores.shape[1]), dim=1)
mlvl_proposals = mm2trt_util.gather_topk(mlvl_proposals, 1, topk_inds)
mlvl_scores = mm2trt_util.gather_topk(mlvl_scores, 1, topk_inds)
num_bboxes = mlvl_proposals.shape[1]
num_detected, proposals, scores, cls_id = self.rcnn_nms(
mlvl_scores, mlvl_proposals, num_bboxes, self.test_cfg.max_per_img)
return num_detected, proposals, scores, cls_id
| 44.05 | 79 | 0.574915 | [
"Apache-2.0"
] | daavoo/mmdetection-to-tensorrt | mmdet2trt/models/dense_heads/fovea_head.py | 3,524 | Python |
from ._version import version_info, __version__ | 47 | 47 | 0.87234 | [
"Apache-2.0"
] | IKNL/VANTAGE6 | vantage6/cli/__init__.py | 47 | Python |
from django.conf.urls import url
from django.conf.urls import patterns
from events import views
urlpatterns = patterns('',
# Events
url(r'^microcosms/(?P<microcosm_id>\d+)/create/event/$', views.create, name='create-event'),
url(r'^events/(?P<event_id>\d+)/$', views.single, name='single-event'),
url(r'^events/(?P<event_id>\d+)/csv/$', views.csv, name='csv-event'),
url(r'^events/(?P<event_id>\d+)/edit/$', views.edit, name='edit-event'),
url(r'^events/(?P<event_id>\d+)/delete/$', views.delete, name='delete-event'),
url(r'^events/(?P<event_id>\d+)/newest/$', views.newest, name='newest-event'),
# RSVP to an event
url(r'^events/(?P<event_id>\d+)/rsvp/$', views.rsvp, name='rsvp-event'),
# Proxy geocoding requests to the backend
url(r'^geocode/$', views.geocode, name='geocode'),
) | 43.736842 | 96 | 0.645006 | [
"MIT"
] | microcosm-cc/microweb | events/urls.py | 831 | Python |
# py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking.
# https://github.com/cheind/py-motmetrics/
#
# MIT License
# Copyright (c) 2017-2020 Christoph Heindl, Jack Valmadre and others.
# See LICENSE file for terms.
"""Tests behavior of MOTAccumulator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import pytest
import motmetrics as mm
def test_events():
"""Tests that expected events are created by MOTAccumulator.update()."""
acc = mm.MOTAccumulator()
# All FP
acc.update([], [1, 2], [], frameid=0)
# All miss
acc.update([1, 2], [], [], frameid=1)
# Match
acc.update([1, 2], [1, 2], [[1, 0.5], [0.3, 1]], frameid=2)
# Switch
acc.update([1, 2], [1, 2], [[0.2, np.nan], [np.nan, 0.1]], frameid=3)
# Match. Better new match is available but should prefer history
acc.update([1, 2], [1, 2], [[5, 1], [1, 5]], frameid=4)
# No data
acc.update([], [], [], frameid=5)
expect = mm.MOTAccumulator.new_event_dataframe()
expect.loc[(0, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(0, 1), :] = ['RAW', np.nan, 1, np.nan]
expect.loc[(0, 2), :] = ['RAW', np.nan, 2, np.nan]
expect.loc[(0, 3), :] = ['FP', np.nan, 1, np.nan]
expect.loc[(0, 4), :] = ['FP', np.nan, 2, np.nan]
expect.loc[(1, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(1, 1), :] = ['RAW', 1, np.nan, np.nan]
expect.loc[(1, 2), :] = ['RAW', 2, np.nan, np.nan]
expect.loc[(1, 3), :] = ['MISS', 1, np.nan, np.nan]
expect.loc[(1, 4), :] = ['MISS', 2, np.nan, np.nan]
expect.loc[(2, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(2, 1), :] = ['RAW', 1, 1, 1.0]
expect.loc[(2, 2), :] = ['RAW', 1, 2, 0.5]
expect.loc[(2, 3), :] = ['RAW', 2, 1, 0.3]
expect.loc[(2, 4), :] = ['RAW', 2, 2, 1.0]
expect.loc[(2, 5), :] = ['MATCH', 1, 2, 0.5]
expect.loc[(2, 6), :] = ['MATCH', 2, 1, 0.3]
expect.loc[(3, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(3, 1), :] = ['RAW', 1, 1, 0.2]
expect.loc[(3, 2), :] = ['RAW', 2, 2, 0.1]
expect.loc[(3, 3), :] = ['TRANSFER', 1, 1, 0.2]
expect.loc[(3, 4), :] = ['SWITCH', 1, 1, 0.2]
expect.loc[(3, 5), :] = ['TRANSFER', 2, 2, 0.1]
expect.loc[(3, 6), :] = ['SWITCH', 2, 2, 0.1]
expect.loc[(4, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(4, 1), :] = ['RAW', 1, 1, 5.]
expect.loc[(4, 2), :] = ['RAW', 1, 2, 1.]
expect.loc[(4, 3), :] = ['RAW', 2, 1, 1.]
expect.loc[(4, 4), :] = ['RAW', 2, 2, 5.]
expect.loc[(4, 5), :] = ['MATCH', 1, 1, 5.]
expect.loc[(4, 6), :] = ['MATCH', 2, 2, 5.]
expect.loc[(5, 0), :] = ['RAW', np.nan, np.nan, np.nan]
pd.util.testing.assert_frame_equal(acc.events, expect)
def test_max_switch_time():
"""Tests max_switch_time option."""
acc = mm.MOTAccumulator(max_switch_time=1)
acc.update([1, 2], [1, 2], [[1, 0.5], [0.3, 1]], frameid=1) # 1->a, 2->b
frameid = acc.update([1, 2], [1, 2], [[0.5, np.nan], [np.nan, 0.5]], frameid=2) # 1->b, 2->a
df = acc.events.loc[frameid]
assert ((df.Type == 'SWITCH') | (df.Type == 'RAW') | (df.Type == 'TRANSFER')).all()
acc = mm.MOTAccumulator(max_switch_time=1)
acc.update([1, 2], [1, 2], [[1, 0.5], [0.3, 1]], frameid=1) # 1->a, 2->b
frameid = acc.update([1, 2], [1, 2], [[0.5, np.nan], [np.nan, 0.5]], frameid=5) # Later frame 1->b, 2->a
df = acc.events.loc[frameid]
assert ((df.Type == 'MATCH') | (df.Type == 'RAW') | (df.Type == 'TRANSFER')).all()
def test_auto_id():
"""Tests auto_id option."""
acc = mm.MOTAccumulator(auto_id=True)
acc.update([1, 2, 3, 4], [], [])
acc.update([1, 2, 3, 4], [], [])
assert acc.events.index.levels[0][-1] == 1
acc.update([1, 2, 3, 4], [], [])
assert acc.events.index.levels[0][-1] == 2
with pytest.raises(AssertionError):
acc.update([1, 2, 3, 4], [], [], frameid=5)
acc = mm.MOTAccumulator(auto_id=False)
with pytest.raises(AssertionError):
acc.update([1, 2, 3, 4], [], [])
def test_merge_dataframes():
"""Tests merge_event_dataframes()."""
# pylint: disable=too-many-statements
acc = mm.MOTAccumulator()
acc.update([], [1, 2], [], frameid=0)
acc.update([1, 2], [], [], frameid=1)
acc.update([1, 2], [1, 2], [[1, 0.5], [0.3, 1]], frameid=2)
acc.update([1, 2], [1, 2], [[0.2, np.nan], [np.nan, 0.1]], frameid=3)
r, mappings = mm.MOTAccumulator.merge_event_dataframes([acc.events, acc.events], return_mappings=True)
expect = mm.MOTAccumulator.new_event_dataframe()
expect.loc[(0, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(0, 1), :] = ['RAW', np.nan, mappings[0]['hid_map'][1], np.nan]
expect.loc[(0, 2), :] = ['RAW', np.nan, mappings[0]['hid_map'][2], np.nan]
expect.loc[(0, 3), :] = ['FP', np.nan, mappings[0]['hid_map'][1], np.nan]
expect.loc[(0, 4), :] = ['FP', np.nan, mappings[0]['hid_map'][2], np.nan]
expect.loc[(1, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(1, 1), :] = ['RAW', mappings[0]['oid_map'][1], np.nan, np.nan]
expect.loc[(1, 2), :] = ['RAW', mappings[0]['oid_map'][2], np.nan, np.nan]
expect.loc[(1, 3), :] = ['MISS', mappings[0]['oid_map'][1], np.nan, np.nan]
expect.loc[(1, 4), :] = ['MISS', mappings[0]['oid_map'][2], np.nan, np.nan]
expect.loc[(2, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(2, 1), :] = ['RAW', mappings[0]['oid_map'][1], mappings[0]['hid_map'][1], 1]
expect.loc[(2, 2), :] = ['RAW', mappings[0]['oid_map'][1], mappings[0]['hid_map'][2], 0.5]
expect.loc[(2, 3), :] = ['RAW', mappings[0]['oid_map'][2], mappings[0]['hid_map'][1], 0.3]
expect.loc[(2, 4), :] = ['RAW', mappings[0]['oid_map'][2], mappings[0]['hid_map'][2], 1.0]
expect.loc[(2, 5), :] = ['MATCH', mappings[0]['oid_map'][1], mappings[0]['hid_map'][2], 0.5]
expect.loc[(2, 6), :] = ['MATCH', mappings[0]['oid_map'][2], mappings[0]['hid_map'][1], 0.3]
expect.loc[(3, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(3, 1), :] = ['RAW', mappings[0]['oid_map'][1], mappings[0]['hid_map'][1], 0.2]
expect.loc[(3, 2), :] = ['RAW', mappings[0]['oid_map'][2], mappings[0]['hid_map'][2], 0.1]
expect.loc[(3, 3), :] = ['TRANSFER', mappings[0]['oid_map'][1], mappings[0]['hid_map'][1], 0.2]
expect.loc[(3, 4), :] = ['SWITCH', mappings[0]['oid_map'][1], mappings[0]['hid_map'][1], 0.2]
expect.loc[(3, 5), :] = ['TRANSFER', mappings[0]['oid_map'][2], mappings[0]['hid_map'][2], 0.1]
expect.loc[(3, 6), :] = ['SWITCH', mappings[0]['oid_map'][2], mappings[0]['hid_map'][2], 0.1]
# Merge duplication
expect.loc[(4, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(4, 1), :] = ['RAW', np.nan, mappings[1]['hid_map'][1], np.nan]
expect.loc[(4, 2), :] = ['RAW', np.nan, mappings[1]['hid_map'][2], np.nan]
expect.loc[(4, 3), :] = ['FP', np.nan, mappings[1]['hid_map'][1], np.nan]
expect.loc[(4, 4), :] = ['FP', np.nan, mappings[1]['hid_map'][2], np.nan]
expect.loc[(5, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(5, 1), :] = ['RAW', mappings[1]['oid_map'][1], np.nan, np.nan]
expect.loc[(5, 2), :] = ['RAW', mappings[1]['oid_map'][2], np.nan, np.nan]
expect.loc[(5, 3), :] = ['MISS', mappings[1]['oid_map'][1], np.nan, np.nan]
expect.loc[(5, 4), :] = ['MISS', mappings[1]['oid_map'][2], np.nan, np.nan]
expect.loc[(6, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(6, 1), :] = ['RAW', mappings[1]['oid_map'][1], mappings[1]['hid_map'][1], 1]
expect.loc[(6, 2), :] = ['RAW', mappings[1]['oid_map'][1], mappings[1]['hid_map'][2], 0.5]
expect.loc[(6, 3), :] = ['RAW', mappings[1]['oid_map'][2], mappings[1]['hid_map'][1], 0.3]
expect.loc[(6, 4), :] = ['RAW', mappings[1]['oid_map'][2], mappings[1]['hid_map'][2], 1.0]
expect.loc[(6, 5), :] = ['MATCH', mappings[1]['oid_map'][1], mappings[1]['hid_map'][2], 0.5]
expect.loc[(6, 6), :] = ['MATCH', mappings[1]['oid_map'][2], mappings[1]['hid_map'][1], 0.3]
expect.loc[(7, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(7, 1), :] = ['RAW', mappings[1]['oid_map'][1], mappings[1]['hid_map'][1], 0.2]
expect.loc[(7, 2), :] = ['RAW', mappings[1]['oid_map'][2], mappings[1]['hid_map'][2], 0.1]
expect.loc[(7, 3), :] = ['TRANSFER', mappings[1]['oid_map'][1], mappings[1]['hid_map'][1], 0.2]
expect.loc[(7, 4), :] = ['SWITCH', mappings[1]['oid_map'][1], mappings[1]['hid_map'][1], 0.2]
expect.loc[(7, 5), :] = ['TRANSFER', mappings[1]['oid_map'][2], mappings[1]['hid_map'][2], 0.1]
expect.loc[(7, 6), :] = ['SWITCH', mappings[1]['oid_map'][2], mappings[1]['hid_map'][2], 0.1]
pd.util.testing.assert_frame_equal(r, expect)
| 47.284946 | 109 | 0.532234 | [
"MIT"
] | Borda/py-motmetrics | motmetrics/tests/test_mot.py | 8,795 | Python |
from django.db import models
from django.utils import timezone
from django.db.models import Sum
#from cycle_2018.models import ScheduleA
import datetime
class BaseModel(models.Model):
active = models.BooleanField(default=True)
created = models.DateTimeField(default=timezone.now)
updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def __unicode__(self):
return self.__str__()
class Donor(BaseModel):
cnn_name = models.CharField(max_length=255, null=True, blank=True)
cnn_employer = models.CharField(max_length=255, null=True, blank=True)
cnn_occupation = models.CharField(max_length=255, null=True, blank=True)
cnn_note = models.TextField(null=True, blank=True)
city = models.CharField(max_length=255, null=True, blank=True)
state = models.CharField(max_length=255, null=True, blank=True)
contribution_total_2018 = models.DecimalField(max_digits=12,decimal_places=2, default=0)
contribution_total_2020 = models.DecimalField(max_digits=12,decimal_places=2, default=0)
def save(self, *args, **kwargs):
self.contribution_total_2018 = self.contributions_2018.filter(active=True).aggregate(Sum('contribution_amount'))['contribution_amount__sum']
if not self.contribution_total_2018:
self.contribution_total_2018 = 0
super().save(*args, **kwargs)
def __str__(self):
return self.cnn_name
| 38.675676 | 148 | 0.739343 | [
"Apache-2.0"
] | capitolmuckrakr/cnn-fec | donor/models.py | 1,431 | Python |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 92500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 150000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.138889 | 798 | 0.804136 | [
"MIT"
] | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | flux_combined_high_binding/model_792.py | 20,550 | Python |
from project.customer import Customer
from project.equipment import Equipment
from project.exercise_plan import ExercisePlan
from project.subscription import Subscription
from project.trainer import Trainer
class Gym:
def __init__(self):
self.customers = []
self.trainers = []
self.equipment = []
self.plans = []
self.subscriptions = []
def add_customer(self, customer: Customer):
for objects in self.customers:
if objects.name == customer.name:
return
self.customers.append(customer)
def add_trainer(self, trainer: Trainer):
for objects in self.trainers:
if objects.name == trainer.name:
return
self.trainers.append(trainer)
def add_equipment(self, equipment: Equipment):
for objects in self.equipment:
if objects.name == equipment.name:
return
self.equipment.append(equipment)
def add_plan(self, plan: ExercisePlan):
for objects in self.plans:
if objects.id == plan.id:
return
self.plans.append(plan)
def add_subscription(self, subscription: Subscription):
for objects in self.subscriptions:
if objects.id == subscription.id:
return
self.subscriptions.append(subscription)
def subscription_info(self, subscription_id: int):
result = []
for s in self.subscriptions:
if s.id == subscription_id:
result.append(repr(s))
for c in self.customers:
if c.id == subscription_id:
result.append(repr(c))
for t in self.trainers:
if t.id == subscription_id:
result.append(repr(t))
for e in self.equipment:
if e.id == subscription_id:
result.append(repr(e))
for p in self.plans:
if p.id == subscription_id:
result.append(repr(p))
return '\n'.join(result)
| 31.78125 | 59 | 0.590462 | [
"MIT"
] | bvoytash/Software-University | Python OOP/Class and Static Methods/Gym/gym.py | 2,034 | Python |
import os
import sys
DIET_IN="""
option display_precision 0;
model diet.mod;
param dname symbolic := "amplxl";
param fname symbolic := "#1#";
table Amounts IN (dname) (fname):
[NUTR,FOOD], amt;
table Foods IN (dname) (fname):
FOOD <- [FOOD], cost, f_min, f_max;
table Nutrients IN (dname) (fname):
NUTR <- [NUTR], n_min, n_max;
read table Foods;
read table Nutrients;
read table Amounts;
display NUTR;
display FOOD;
display amt;
solve;
display {j in FOOD} (Buy[j], Buy.rc[j], Buy[j]/f_max[j]);
"""
DIET_INOUT="""
option display_precision 0;
model diet.mod;
param dname symbolic := "amplxl";
param fname_in symbolic := "#1#";
param fname_out symbolic := "#2#";
table Amounts IN (dname) (fname_in):
[NUTR,FOOD], amt;
table Foods IN (dname) (fname_in):
FOOD <- [FOOD], cost, f_min, f_max;
table Nutrients IN (dname) (fname_in):
NUTR <- [NUTR], n_min, n_max;
read table Foods;
read table Nutrients;
read table Amounts;
display NUTR;
display FOOD;
display amt;
solve;
display {j in FOOD} (Buy[j], Buy.rc[j], Buy[j]/f_max[j]);
table ExportFoods (dname) (fname_out) "Foods":
[FOOD] IN, Buy OUT, Buy.rc ~ BuyRC OUT, {j in FOOD} Buy[j]/f_max[j] ~ BuyFrac OUT;
write table ExportFoods;
# read exported table to validate results
reset;
option display_precision 0;
param dname symbolic := "amplxl";
param fname_out symbolic := "#2#";
set FOOD;
param cost{FOOD};
param f_min{FOOD};
param f_max{FOOD};
param Buy{FOOD};
param BuyFrac{FOOD};
param BuyRC{FOOD};
table Foods IN (dname) (fname_out):
FOOD <- [FOOD], cost, f_min, f_max, Buy, BuyFrac, BuyRC;
read table Foods;
display FOOD;
display cost;
display f_min;
display f_max;
display Buy;
display BuyFrac;
display BuyRC;
"""
DIET_OUT="""
option display_precision 0;
model diet.mod;
param dname symbolic := "amplxl";
param fname_in symbolic := "#1#";
param fname_out symbolic := "#2#";
table Amounts IN (dname) (fname_in):
[NUTR,FOOD], amt;
table Foods IN (dname) (fname_in):
FOOD <- [FOOD], cost, f_min, f_max;
table Nutrients IN (dname) (fname_in):
NUTR <- [NUTR], n_min, n_max;
read table Foods;
read table Nutrients;
read table Amounts;
display NUTR;
display FOOD;
display amt;
solve;
display {j in FOOD} (Buy[j], Buy.rc[j], Buy[j]/f_max[j]);
table ExportFoods OUT (dname) (fname_out) "Foods":
[FOOD], cost, f_min, f_max, Buy, Buy.rc ~ BuyRC, {j in FOOD} Buy[j]/f_max[j] ~ BuyFrac;
write table ExportFoods;
# read exported table to validate results
reset;
option display_precision 0;
param dname symbolic := "amplxl";
param fname_out symbolic := "#2#";
set FOOD;
param cost{FOOD};
param f_min{FOOD};
param f_max{FOOD};
param Buy{FOOD};
param BuyFrac{FOOD};
param BuyRC{FOOD};
table Foods IN (dname) (fname_out):
FOOD <- [FOOD], cost, f_min, f_max, Buy, BuyFrac, BuyRC;
read table Foods;
display FOOD;
display cost;
display f_min;
display f_max;
display Buy;
display BuyFrac;
display BuyRC;
"""
DIET_INOUT_SINGLE="""
option display_precision 0;
model diet.mod;
param dname symbolic := "amplxl";
param fname_out symbolic := "#1#";
table Amounts IN (dname) (fname_out):
[NUTR,FOOD], amt;
table Nutrients IN (dname) (fname_out):
NUTR <- [NUTR], n_min, n_max;
table Foods (dname) (fname_out):
[FOOD] IN, cost IN, f_min IN, f_max IN,
Buy OUT, Buy.rc ~ BuyRC OUT,
{j in FOOD} Buy[j]/f_max[j] ~ BuyFrac OUT;
read table Foods;
read table Nutrients;
read table Amounts;
display NUTR;
display FOOD;
display amt;
solve;
display {j in FOOD} (Buy[j], Buy.rc[j], Buy[j]/f_max[j]);
write table Foods;
# read exported table to validate results
reset;
option display_precision 0;
param dname symbolic := "amplxl";
param fname_out symbolic := "#1#";
set FOOD;
param cost{FOOD};
param f_min{FOOD};
param f_max{FOOD};
param Buy{FOOD};
param BuyFrac{FOOD};
param BuyRC{FOOD};
table Foods IN (dname) (fname_out):
FOOD <- [FOOD], cost, f_min, f_max, Buy, BuyFrac, BuyRC;
read table Foods;
display FOOD;
display cost;
display f_min;
display f_max;
display Buy;
display BuyFrac;
display BuyRC;
"""
if __name__ == '__main__':
NTYPES = 3
RUN_DIR = 'run/'
XLSX_DIR = ''
generated = []
for i in range(NTYPES):
name_in = os.path.join(XLSX_DIR, 'diet_in_{}.xlsx'.format(i+1))
runfile = os.path.join(RUN_DIR, 'diet_in_{}.run'.format(i+1))
with open(runfile, 'w') as f:
f.write(
DIET_IN.replace('#1#', name_in)
)
generated.append(runfile)
for i in range(NTYPES):
name_in = os.path.join(XLSX_DIR, 'diet_in_{}.xlsx'.format(i+1))
name_out = os.path.join(XLSX_DIR, 'diet_inout_{}.xlsx'.format(i+1))
runfile = os.path.join(RUN_DIR, 'diet_inout_{}.run'.format(i+1))
with open(runfile, 'w') as f:
f.write(
DIET_INOUT.replace('#1#', name_in).replace('#2#', name_out)
)
generated.append(runfile)
for i in range(NTYPES):
name_in = os.path.join(XLSX_DIR, 'diet_in_{}.xlsx'.format(i+1))
name_out = os.path.join(XLSX_DIR, 'diet_out_{}.xlsx'.format(i+1))
runfile = os.path.join(RUN_DIR, 'diet_out_{}.run'.format(i+1))
with open(runfile, 'w') as f:
f.write(
DIET_OUT.replace('#1#', name_in).replace('#2#', name_out)
)
generated.append(runfile)
for i in range(NTYPES):
name = os.path.join(XLSX_DIR, 'diet_inout_single_{}.xlsx'.format(i+1))
runfile = os.path.join(RUN_DIR, 'diet_inout_single_{}.run'.format(i+1))
with open(runfile, 'w') as f:
f.write(
DIET_INOUT_SINGLE.replace('#1#', name)
)
generated.append(runfile)
for script in generated:
os.system(
'ampl -i amplxl.dll {} > {}'.format(
script, script.replace('.run', '.out')
)
)
| 22.358779 | 91 | 0.643906 | [
"BSD-3-Clause"
] | Seanpm2001-AMPL-lang/plugins | src/amplxl/tests/generate.py | 5,858 | Python |
import json
import re
import logging as log
from .helpers import path_leaf
regexes = {
"bp_processed": "Total basepairs processed:\s*([\d,]+) bp",
"bp_written": "Total written \(filtered\):\s*([\d,]+) bp",
"quality_trimmed": "Quality-trimmed:\s*([\d,]+) bp",
"r_processed": "Total reads processed:\s*([\d,]+)",
"r_with_adapters": "Reads with adapters:\s*([\d,]+)",
}
def cutadapt_to_json(filepath, savetofile=None):
"""Convert cutadapt/trim_galore output to json
Parameters
----------
filepath: string
Path to trim_galore/cutadapt output.txt
Returns
-------
json_data: dict
"""
fh = open(filepath, "r")
trim_info = {}
length_counts = {}
length_exp = {}
length_obsexp = {}
adapters = {}
sample = None
for l in fh:
if "cutadapt" in l:
sample = None
if l.startswith("Used user"):
# Used user provided input and hence no second pass
adapters = "User provided"
break
if l.startswith("No adapter"):
adapters = "None found (second pass)"
break
if l.startswith("Command line parameters"):
sample = l.split()[-1]
sample = path_leaf(sample).replace(".fq.gz", "").replace(".fastq.gz", "")
if sample in trim_info:
log.debug("Duplicate sample name found! Overwriting: {}".format(sample))
trim_info[sample] = dict()
if sample is not None:
for k, r in list(regexes.items()):
match = re.search(r, l)
if match:
trim_info[sample][k] = int(match.group(1).replace(",", ""))
if "===" in l:
log_section = l.strip().strip("=").strip()
if l.startswith("Sequence:"):
plot_sname = "{} - {}".format(sample, log_section)
adapters[plot_sname] = l.split(";")[0].strip("Sequence: ")
if "length" in l and "count" in l and "expect" in l:
plot_sname = sample
if log_section is not None:
plot_sname = "{} - {}".format(sample, log_section)
length_counts[plot_sname] = dict()
length_exp[plot_sname] = dict()
length_obsexp[plot_sname] = dict()
for l in fh:
r_seqs = re.search("^(\d+)\s+(\d+)\s+([\d\.]+)", l)
if r_seqs:
a_len = int(r_seqs.group(1))
length_counts[plot_sname][a_len] = int(r_seqs.group(2))
length_exp[plot_sname][a_len] = float(r_seqs.group(3))
if float(r_seqs.group(3)) > 0:
length_obsexp[plot_sname][a_len] = float(
r_seqs.group(2)
) / float(r_seqs.group(3))
else:
length_obsexp[plot_sname][a_len] = float(r_seqs.group(2))
else:
break
fh.close()
json_data = {
"adapters": adapters,
"trim_info": trim_info,
"length_exp": length_exp,
"length_obsexp": length_obsexp,
"length_counts": length_counts,
}
if savetofile:
json.dump(json_data, savetofile)
return json_data
| 35.904255 | 88 | 0.502815 | [
"BSD-3-Clause"
] | saketkc/riboraptor | riboraptor/cutadapt_to_json.py | 3,375 | Python |
"""
The ``mlflow.keras`` module provides an API for logging and loading Keras models. This module
exports Keras models with the following flavors:
Keras (native) format
This is the main flavor that can be loaded back into Keras.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and batch inference.
"""
import importlib
import os
import yaml
import gorilla
import tempfile
import shutil
import pandas as pd
from distutils.version import LooseVersion
from mlflow import pyfunc
from mlflow.models import Model
import mlflow.tracking
from mlflow.exceptions import MlflowException
from mlflow.models.signature import ModelSignature
from mlflow.models.utils import ModelInputExample, _save_example
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.utils.annotations import experimental
from mlflow.utils.autologging_utils import try_mlflow_log, log_fn_args_as_params
FLAVOR_NAME = "keras"
# File name to which custom objects cloudpickle is saved - used during save and load
_CUSTOM_OBJECTS_SAVE_PATH = "custom_objects.cloudpickle"
_KERAS_MODULE_SPEC_PATH = "keras_module.txt"
# File name to which keras model is saved
_MODEL_SAVE_PATH = "model.h5"
# Conda env subpath when saving/loading model
_CONDA_ENV_SUBPATH = "conda.yaml"
def get_default_conda_env(include_cloudpickle=False, keras_module=None):
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
import tensorflow as tf
conda_deps = [] # if we use tf.keras we only need to declare dependency on tensorflow
pip_deps = []
if keras_module is None:
import keras
keras_module = keras
if keras_module.__name__ == "keras":
# Temporary fix: the created conda environment has issues installing keras >= 2.3.1
if LooseVersion(keras_module.__version__) < LooseVersion('2.3.1'):
conda_deps.append("keras=={}".format(keras_module.__version__))
else:
pip_deps.append("keras=={}".format(keras_module.__version__))
if include_cloudpickle:
import cloudpickle
pip_deps.append("cloudpickle=={}".format(cloudpickle.__version__))
# Temporary fix: conda-forge currently does not have tensorflow > 1.14
# The Keras pyfunc representation requires the TensorFlow
# backend for Keras. Therefore, the conda environment must
# include TensorFlow
if LooseVersion(tf.__version__) <= LooseVersion('1.13.2'):
conda_deps.append("tensorflow=={}".format(tf.__version__))
else:
pip_deps.append("tensorflow=={}".format(tf.__version__))
return _mlflow_conda_env(
additional_conda_deps=conda_deps,
additional_pip_deps=pip_deps,
additional_conda_channels=None)
def save_model(keras_model, path, conda_env=None, mlflow_model=None, custom_objects=None,
keras_module=None,
signature: ModelSignature = None, input_example: ModelInputExample = None,
**kwargs):
"""
Save a Keras model to a path on the local file system.
:param keras_model: Keras model to be saved.
:param path: Local path where the model is to be saved.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in. At minimum, it should specify the
dependencies contained in :func:`get_default_conda_env()`. If
``None``, the default :func:`get_default_conda_env()` environment is
added to the model. The following is an *example* dictionary
representation of a Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'keras=2.2.4',
'tensorflow=1.8.0'
]
}
:param mlflow_model: MLflow model config this flavor is being added to.
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param keras_module: Keras module to be used to save / load the model
(``keras`` or ``tf.keras``). If not provided, MLflow will
attempt to infer the Keras module based on the given model.
:param kwargs: kwargs to pass to ``keras_model.save`` method.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
.. code-block:: python
:caption: Example
import mlflow
# Build, compile, and train your model
keras_model = ...
keras_model_path = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
# Save the model as an MLflow Model
mlflow.keras.save_model(keras_model, keras_model_path)
"""
if keras_module is None:
def _is_plain_keras(model):
try:
# NB: Network is the first parent with save method
import keras.engine.network
return isinstance(model, keras.engine.network.Network)
except ImportError:
return False
def _is_tf_keras(model):
try:
# NB: Network is not exposed in tf.keras, we check for Model instead.
import tensorflow.keras.models
return isinstance(model, tensorflow.keras.models.Model)
except ImportError:
return False
if _is_plain_keras(keras_model):
keras_module = importlib.import_module("keras")
elif _is_tf_keras(keras_model):
keras_module = importlib.import_module("tensorflow.keras")
else:
raise MlflowException("Unable to infer keras module from the model, please specify "
"which keras module ('keras' or 'tensorflow.keras') is to be "
"used to save and load the model.")
elif type(keras_module) == str:
keras_module = importlib.import_module(keras_module)
# check if path exists
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path))
# construct new data folder in existing path
data_subpath = "data"
data_path = os.path.join(path, data_subpath)
os.makedirs(data_path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
# save custom objects if there are custom objects
if custom_objects is not None:
_save_custom_objects(data_path, custom_objects)
# save keras module spec to path/data/keras_module.txt
with open(os.path.join(data_path, _KERAS_MODULE_SPEC_PATH), "w") as f:
f.write(keras_module.__name__)
# save keras model to path/data/model.h5
model_subpath = os.path.join(data_subpath, _MODEL_SAVE_PATH)
model_path = os.path.join(path, model_subpath)
if path.startswith('/dbfs/'):
# The Databricks Filesystem uses a FUSE implementation that does not support
# random writes. It causes an error.
with tempfile.NamedTemporaryFile(suffix='.h5') as f:
keras_model.save(f.name, **kwargs)
f.flush() # force flush the data
shutil.copyfile(src=f.name, dst=model_path)
else:
keras_model.save(model_path, **kwargs)
# update flavor info to mlflow_model
mlflow_model.add_flavor(FLAVOR_NAME,
keras_module=keras_module.__name__,
keras_version=keras_module.__version__,
data=data_subpath)
# save conda.yaml info to path/conda.yml
if conda_env is None:
conda_env = get_default_conda_env(include_cloudpickle=custom_objects is not None,
keras_module=keras_module)
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, _CONDA_ENV_SUBPATH), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# append loader_module, data and env data to mlflow_model
pyfunc.add_to_model(mlflow_model, loader_module="mlflow.keras",
data=data_subpath, env=_CONDA_ENV_SUBPATH)
# save mlflow_model to path/MLmodel
mlflow_model.save(os.path.join(path, "MLmodel"))
def log_model(keras_model, artifact_path, conda_env=None, custom_objects=None, keras_module=None,
registered_model_name=None, signature: ModelSignature=None,
input_example: ModelInputExample=None, **kwargs):
"""
Log a Keras model as an MLflow artifact for the current run.
:param keras_model: Keras model to be saved.
:param artifact_path: Run-relative artifact path.
:param conda_env: Either a dictionary representation of a Conda environment or
the path to a Conda environment yaml file.
If provided, this describes the environment this model should be
run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`mlflow.keras.get_default_conda_env()` environment is added to
the model. The following is an *example* dictionary representation of a
Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'keras=2.2.4',
'tensorflow=1.8.0'
]
}
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param keras_module: Keras module to be used to save / load the model
(``keras`` or ``tf.keras``). If not provided, MLflow will
attempt to infer the Keras module based on the given model.
:param registered_model_name: (Experimental) If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
:param kwargs: kwargs to pass to ``keras_model.save`` method.
.. code-block:: python
:caption: Example
from keras import Dense, layers
import mlflow
# Build, compile, and train your model
keras_model = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
# Log metrics and log the model
with mlflow.start_run() as run:
mlflow.keras.log_model(keras_model, "models")
"""
Model.log(artifact_path=artifact_path, flavor=mlflow.keras,
keras_model=keras_model, conda_env=conda_env, custom_objects=custom_objects,
keras_module=keras_module, registered_model_name=registered_model_name,
signature=signature, input_example=input_example,
**kwargs)
def _save_custom_objects(path, custom_objects):
"""
Save custom objects dictionary to a cloudpickle file so a model can be easily loaded later.
:param path: An absolute path that points to the data directory within /path/to/model.
:param custom_objects: Keras ``custom_objects`` is a dictionary mapping
names (strings) to custom classes or functions to be considered
during deserialization. MLflow saves these custom layers using
CloudPickle and restores them automatically when the model is
loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
"""
import cloudpickle
custom_objects_path = os.path.join(path, _CUSTOM_OBJECTS_SAVE_PATH)
with open(custom_objects_path, "wb") as out_f:
cloudpickle.dump(custom_objects, out_f)
def _load_model(model_path, keras_module, **kwargs):
keras_models = importlib.import_module(keras_module.__name__ + ".models")
custom_objects = kwargs.pop("custom_objects", {})
custom_objects_path = None
if os.path.isdir(model_path):
if os.path.isfile(os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)):
custom_objects_path = os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)
model_path = os.path.join(model_path, _MODEL_SAVE_PATH)
if custom_objects_path is not None:
import cloudpickle
with open(custom_objects_path, "rb") as in_f:
pickled_custom_objects = cloudpickle.load(in_f)
pickled_custom_objects.update(custom_objects)
custom_objects = pickled_custom_objects
from distutils.version import StrictVersion
if StrictVersion(keras_module.__version__.split('-')[0]) >= StrictVersion("2.2.3"):
# NOTE: Keras 2.2.3 does not work with unicode paths in python2. Pass in h5py.File instead
# of string to avoid issues.
import h5py
with h5py.File(os.path.abspath(model_path), "r") as model_path:
return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs)
else:
# NOTE: Older versions of Keras only handle filepath.
return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs)
class _KerasModelWrapper:
def __init__(self, keras_model, graph, sess):
self.keras_model = keras_model
self._graph = graph
self._sess = sess
def predict(self, dataframe):
# In TensorFlow < 2.0, we use a graph and session to predict
if self._graph is not None:
with self._graph.as_default():
with self._sess.as_default():
predicted = pd.DataFrame(self.keras_model.predict(dataframe.values))
# In TensorFlow >= 2.0, we do not use a graph and session to predict
else:
predicted = pd.DataFrame(self.keras_model.predict(dataframe.values))
predicted.index = dataframe.index
return predicted
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``keras`` flavor.
"""
import tensorflow as tf
if os.path.isfile(os.path.join(path, _KERAS_MODULE_SPEC_PATH)):
with open(os.path.join(path, _KERAS_MODULE_SPEC_PATH), "r") as f:
keras_module = importlib.import_module(f.read())
else:
import keras
keras_module = keras
K = importlib.import_module(keras_module.__name__ + ".backend")
if keras_module.__name__ == "tensorflow.keras" or K.backend() == 'tensorflow':
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
graph = tf.Graph()
sess = tf.Session(graph=graph)
# By default tf backed models depend on the global graph and session.
# We create an use new Graph and Session and store them with the model
# This way the model is independent on the global state.
with graph.as_default():
with sess.as_default(): # pylint:disable=not-context-manager
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, graph, sess)
else:
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, None, None)
else:
raise MlflowException("Unsupported backend '%s'" % K._BACKEND)
def load_model(model_uri, **kwargs):
"""
Load a Keras model from a local file or a run.
Extra arguments are passed through to keras.load_model.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:return: A Keras model instance.
.. code-block:: python
:caption: Example
# Load persisted model as a Keras model or as a PyFunc, call predict() on a pandas DataFrame
keras_model = mlflow.keras.load_model("runs:/96771d893a5e46159d9f3b49bf9013e2" + "/models")
predictions = keras_model.predict(x_test)
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
keras_module = importlib.import_module(flavor_conf.get("keras_module", "keras"))
keras_model_artifacts_path = os.path.join(
local_model_path,
flavor_conf.get("data", _MODEL_SAVE_PATH))
return _load_model(model_path=keras_model_artifacts_path, keras_module=keras_module, **kwargs)
@experimental
def autolog():
# pylint: disable=E0611
"""
Enables automatic logging from Keras to MLflow. Autologging captures the following information:
**Metrics** and **Parameters**
- Training loss; validation loss; user-specified metrics
- Metrics associated with the ``EarlyStopping`` callbacks: ``stopped_epoch``,
``restored_epoch``, ``restore_best_weight``, ``last_epoch``, etc
- ``fit()`` or ``fit_generator()`` parameters; optimizer name; learning rate; epsilon
- ``fit()`` or ``fit_generator()`` parameters associated with ``EarlyStopping``: ``min_delta``,
``patience``, ``baseline``, ``restore_best_weights``, etc
**Artifacts**
- Model summary on training start
- `MLflow Model <https://mlflow.org/docs/latest/models.html>`_ (Keras model) on training end
.. code-block:: python
:caption: Example
import mlflow
import mlflow.keras
# Build, compile, enable autologging, and train your model
keras_model = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
# autolog your metrics, parameters, and model
mlflow.keras.autolog()
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size=128, validation_data=(x_val, y_val))
``EarlyStopping Integration with Keras AutoLogging``
MLflow will detect if an ``EarlyStopping`` callback is used in a ``fit()`` or
``fit_generator()`` call, and if the ``restore_best_weights`` parameter is set to be ``True``,
then MLflow will log the metrics associated with the restored model as a final, extra step.
The epoch of the restored model will also be logged as the metric ``restored_epoch``.
This allows for easy comparison between the actual metrics of the restored model and
the metrics of other models.
If ``restore_best_weights`` is set to be ``False``, then MLflow will not log an additional step.
Regardless of ``restore_best_weights``, MLflow will also log ``stopped_epoch``,
which indicates the epoch at which training stopped due to early stopping.
If training does not end due to early stopping, then ``stopped_epoch`` will be logged as ``0``.
MLflow will also log the parameters of the ``EarlyStopping`` callback,
excluding ``mode`` and ``verbose``.
"""
import keras
class __MLflowKerasCallback(keras.callbacks.Callback):
"""
Callback for auto-logging metrics and parameters.
Records available logs after each epoch.
Records model structural information as params when training begins
"""
def on_train_begin(self, logs=None): # pylint: disable=unused-argument
try_mlflow_log(mlflow.log_param, 'num_layers', len(self.model.layers))
try_mlflow_log(mlflow.log_param, 'optimizer_name', type(self.model.optimizer).__name__)
if hasattr(self.model.optimizer, 'lr'):
lr = self.model.optimizer.lr if \
type(self.model.optimizer.lr) is float \
else keras.backend.eval(self.model.optimizer.lr)
try_mlflow_log(mlflow.log_param, 'learning_rate', lr)
if hasattr(self.model.optimizer, 'epsilon'):
epsilon = self.model.optimizer.epsilon if \
type(self.model.optimizer.epsilon) is float \
else keras.backend.eval(self.model.optimizer.epsilon)
try_mlflow_log(mlflow.log_param, 'epsilon', epsilon)
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = '\n'.join(sum_list)
tempdir = tempfile.mkdtemp()
try:
summary_file = os.path.join(tempdir, "model_summary.txt")
with open(summary_file, 'w') as f:
f.write(summary)
try_mlflow_log(mlflow.log_artifact, local_path=summary_file)
finally:
shutil.rmtree(tempdir)
def on_epoch_end(self, epoch, logs=None):
if not logs:
return
try_mlflow_log(mlflow.log_metrics, logs, step=epoch)
def on_train_end(self, logs=None):
try_mlflow_log(log_model, self.model, artifact_path='model')
# As of Keras 2.4.0, Keras Callback implementations must define the following
# methods indicating whether or not the callback overrides functions for
# batch training/testing/inference
def _implements_train_batch_hooks(self): return False
def _implements_test_batch_hooks(self): return False
def _implements_predict_batch_hooks(self): return False
def _early_stop_check(callbacks):
if LooseVersion(keras.__version__) < LooseVersion('2.3.0'):
es_callback = keras.callbacks.EarlyStopping
else:
es_callback = keras.callbacks.callbacks.EarlyStopping
for callback in callbacks:
if isinstance(callback, es_callback):
return callback
return None
def _log_early_stop_callback_params(callback):
if callback:
try:
earlystopping_params = {'monitor': callback.monitor,
'min_delta': callback.min_delta,
'patience': callback.patience,
'baseline': callback.baseline,
'restore_best_weights': callback.restore_best_weights}
try_mlflow_log(mlflow.log_params, earlystopping_params)
except Exception: # pylint: disable=W0703
return
def _get_early_stop_callback_attrs(callback):
try:
return callback.stopped_epoch, callback.restore_best_weights, callback.patience
except Exception: # pylint: disable=W0703
return None
def _log_early_stop_callback_metrics(callback, history):
if callback:
callback_attrs = _get_early_stop_callback_attrs(callback)
if callback_attrs is None:
return
stopped_epoch, restore_best_weights, patience = callback_attrs
try_mlflow_log(mlflow.log_metric, 'stopped_epoch', stopped_epoch)
# Weights are restored only if early stopping occurs
if stopped_epoch != 0 and restore_best_weights:
restored_epoch = stopped_epoch - max(1, patience)
try_mlflow_log(mlflow.log_metric, 'restored_epoch', restored_epoch)
restored_metrics = {key: history.history[key][restored_epoch]
for key in history.history.keys()}
# Checking that a metric history exists
metric_key = next(iter(history.history), None)
if metric_key is not None:
last_epoch = len(history.history[metric_key])
try_mlflow_log(mlflow.log_metrics, restored_metrics, step=last_epoch)
def _run_and_log_function(self, original, args, kwargs, unlogged_params, callback_arg_index):
if not mlflow.active_run():
try_mlflow_log(mlflow.start_run)
auto_end_run = True
else:
auto_end_run = False
log_fn_args_as_params(original, args, kwargs, unlogged_params)
early_stop_callback = None
# Checking if the 'callback' argument of the function is set
if len(args) > callback_arg_index:
tmp_list = list(args)
early_stop_callback = _early_stop_check(tmp_list[callback_arg_index])
tmp_list[callback_arg_index] += [__MLflowKerasCallback()]
args = tuple(tmp_list)
elif 'callbacks' in kwargs:
early_stop_callback = _early_stop_check(kwargs['callbacks'])
kwargs['callbacks'] += [__MLflowKerasCallback()]
else:
kwargs['callbacks'] = [__MLflowKerasCallback()]
_log_early_stop_callback_params(early_stop_callback)
history = original(self, *args, **kwargs)
_log_early_stop_callback_metrics(early_stop_callback, history)
if auto_end_run:
try_mlflow_log(mlflow.end_run)
return history
@gorilla.patch(keras.Model)
def fit(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit')
unlogged_params = ['self', 'x', 'y', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 5)
@gorilla.patch(keras.Model)
def fit_generator(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit_generator')
unlogged_params = ['self', 'generator', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 4)
settings = gorilla.Settings(allow_hit=True, store_hit=True)
gorilla.apply(gorilla.Patch(keras.Model, 'fit', fit, settings=settings))
gorilla.apply(gorilla.Patch(keras.Model, 'fit_generator', fit_generator, settings=settings))
| 47.465331 | 100 | 0.636845 | [
"Apache-2.0"
] | AnesBenmerzoug/mlflow | mlflow/keras.py | 30,805 | Python |
from django import forms
from django.forms import ModelForm
from .models import FoodHomePageModel,FullMenuPageModel
class FoodHomeForm(forms.ModelForm):
class Meta:
model = FoodHomePageModel
exclude = ("company",'status',)
class FoodMenuForm(forms.ModelForm):
class Meta:
model = FullMenuPageModel
exclude = ("company",'status',) | 23.470588 | 55 | 0.66416 | [
"MIT"
] | CPU-sangoma/PlentyPot | FoodStore/forms.py | 399 | Python |
import cv2
import numpy as np
from sklearn.cluster import DBSCAN as skDBSCAN
def DBSCAN(src, eps, min_samples):
arr = cv2.cvtColor(src, cv2.COLOR_BGR2LAB).reshape(-1, src.shape[2])
clustering = skDBSCAN(eps=eps, min_samples=min_samples).fit(arr)
labels = clustering.labels_ + 1
maps = labels.reshape(src.shape[:2])
return maps, labels
def drawDBSCAN(src, maps, labels):
colors = []
for lb in set(labels):
mask = np.where(maps == lb, 255, 0).astype(np.uint8)
color = list(map(int, list(cv2.mean(src, mask)[:src.shape[2]])))
colors.append(np.array(color, dtype=np.uint8))
colors = np.asarray(colors)
dst = colors[labels].astype(np.uint8).reshape(src.shape)
return dst
| 31.291667 | 72 | 0.653795 | [
"Apache-2.0"
] | 076923/cv2-utils | cv2u/core/cluster.py | 751 | Python |
from setuptools import setup, find_packages
setup(
name='src',
version='0.0.1',
packages=find_packages(),
) | 17.142857 | 43 | 0.675 | [
"Apache-2.0"
] | YohannaWANG/CS5242_Project | src/setup.py | 120 | Python |
class MolecularDescriptorsEnum:
HEAVY_ATOM_COUNT = "heavy_atom_count"
MOLECULAR_WEIGHT = "molecular_weight"
CLOGP = "clogp"
HYDROGEN_BOND_DONORS = "hydrogen_bond_donors"
HYDROGEN_BOND_ACCEPTORS = "hydrogen_bond_acceptors"
ROTATABLE_BONDS = "rotatable_bonds"
RING_COUNT = "ring_count"
| 26.25 | 55 | 0.75873 | [
"MIT"
] | MolecularAI/reinvent-chemistry | reinvent_chemistry/library_design/enums/molecular_descriptors_enum.py | 315 | Python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Armadillo(CMakePackage):
"""Armadillo is a high quality linear algebra library (matrix maths)
for the C++ language, aiming towards a good balance between speed and
ease of use.
"""
homepage = "http://arma.sourceforge.net/"
url = "http://sourceforge.net/projects/arma/files/armadillo-7.200.1.tar.xz"
version('8.100.1', 'd9762d6f097e0451d0cfadfbda295e7c')
version('7.950.1', 'c06eb38b12cae49cab0ce05f96147147')
version('7.900.1', '5ef71763bd429a3d481499878351f3be')
version('7.500.0', '7d316fdf3c3c7ea92b64704180ae315d')
version('7.200.2', 'b21585372d67a8876117fd515d8cf0a2')
version('7.200.1', 'ed86d6df0058979e107502e1fe3e469e')
variant('hdf5', default=False, description='Include HDF5 support')
depends_on('[email protected]:', type='build')
depends_on('arpack-ng') # old arpack causes undefined symbols
depends_on('blas')
depends_on('lapack')
depends_on('[email protected]:')
depends_on('hdf5', when='+hdf5')
patch('undef_linux.patch', when='platform=linux')
def cmake_args(self):
spec = self.spec
# TUTORIAL: fix the lines below by adding the appropriate query to
# the right dependency. To ask a dependency, e.g. `blas`, for the
# list of libraries it provides it suffices to access its `libs`
# attribute:
#
# blas_libs = spec['blas'].libs
#
# The CMake variables below require a semicolon separated list:
#
# blas_libs.joined(';')
return [
# ARPACK support
'-DARPACK_LIBRARY={0}'.format('FIXME: arpack-ng'),
# BLAS support
'-DBLAS_LIBRARY={0}'.format('FIXME: blas'),
# LAPACK support
'-DLAPACK_LIBRARY={0}'.format('FIXME: lapack'),
# SuperLU support
'-DSuperLU_INCLUDE_DIR={0}'.format(spec['superlu'].prefix.include),
'-DSuperLU_LIBRARY={0}'.format('FIXME: superlu'),
# HDF5 support
'-DDETECT_HDF5={0}'.format('ON' if '+hdf5' in spec else 'OFF')
]
| 36.698413 | 79 | 0.637976 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | PDoakORNL/spack | var/spack/repos/tutorial/packages/armadillo/package.py | 2,312 | Python |
import base64
def display_skills(skills):
result = []
for skill in skills:
base = f'''<img width ='22px' align='left' src ='{'https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/'+skill+'.svg'}'>'''
result.append(base)
return '\n'.join(result)
def display_socials(linkedin,twitter,medium,portfolio,github):
result = ''
if linkedin != '':
linkedin = 'https://www.linkedin.com/in/'+linkedin
result += f'''<a href = '{linkedin}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/linked-in-alt.svg"/></a> \n'''
if twitter != '':
twitter = 'https://www.twitter.com/'+twitter
result += f'''<a href = '{twitter}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/twitter.svg"/></a> \n'''
if medium != '':
result += f'''<a href = '{medium}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/medium.svg"/></a> \n'''
if portfolio != '':
result += f'''<a href = '{portfolio}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/portfolio.png"/></a> \n'''
if github != '':
github = 'https://www.github.com/'+github
result += f'''<a href = '{github}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/github.svg"/></a> \n'''
return result
def default_html(name = 'Rahul', linkedin_url = '',twitter_url = '',medium_url='',portfolio_url='',waka_userName = 'rahulbanerjee26',github_username = 'rahulbanerjee26',p1='......',p2='.......',p3='.........',p4='.........',skills=[]):
return f'''
# Hello World <img src = "https://raw.githubusercontent.com/MartinHeinz/MartinHeinz/master/wave.gif" width = 50px>

<div size='20px'> Hi! My name is {name}. Thank You for taking the time to view my GitHub Profile :smile:
<h2> Connect with me <img src='https://raw.githubusercontent.com/ShahriarShafin/ShahriarShafin/main/Assets/handshake.gif' width="64px"> </h2>
{display_socials(linkedin_url,twitter_url,medium_url,portfolio_url,github_username)}
</div>
<h2> Skills </h2>
{display_skills(skills)}
<h2> About Me</h2>
- 🔭 I’m currently working on {p1}
- 🌱 I’m currently learning {p2}
- 👯 I’m looking to collaborate on {p3}
- 💬 Talk to me about {p4}
## Stuff I worked on last week⌚
<a href="https://github.com/anuraghazra/github-readme-stats">
<img align="center" src="https://github-readme-stats.vercel.app/api/wakatime?username=@{waka_userName}&compact=True"/>
</a>
## My GitHub Stats 📊
<a href="https://github.com/anuraghazra/github-readme-stats">
<img align="left" src="https://github-readme-stats.vercel.app/api?username={github_username}&count_private=true&show_icons=true&theme=radical" />
</a>
<a href="https://github.com/anuraghazra/convoychat">
<img align="center" src="https://github-readme-stats.vercel.app/api/top-langs/?username={github_username}&layout=compact" />
</a>
<!-- BLOG-POST-LIST:START -->
<!-- BLOG-POST-LIST:END -->
'''
def get_yml(feed_url):
yml_file = f'''
name: Latest blog post workflow
on:
schedule: # Run workflow automatically
- cron: '0 * * * *' # Runs every hour, on the hour
workflow_dispatch: # Run workflow manually (without waiting for the cron to be called), through the Github Actions Workflow page directly
jobs:
update-readme-with-blog:
name: Update this repo's README with latest blog posts
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: gautamkrishnar/blog-post-workflow@master
with:
feed_list: "{feed_url}"
'''
b64 = base64.b64encode(yml_file.encode()).decode()
href = f'<a href="data:file/csv;base64,{b64}" download="blog-post-workflow.yml">Download yml file</a>'
return href
def download_readme(code):
b64 = base64.b64encode(code.encode()).decode()
href = f'<h4><a href="data:file/csv;base64,{b64}" download="README.md">Dowload README</a></h4>'
return href
| 43.12 | 235 | 0.678108 | [
"MIT"
] | saminul/githubProfileReadmeGenerator | helpers.py | 4,335 | Python |
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class UpdateLoadBalancerResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'request_id': 'str',
'loadbalancer': 'LoadBalancer'
}
attribute_map = {
'request_id': 'request_id',
'loadbalancer': 'loadbalancer'
}
def __init__(self, request_id=None, loadbalancer=None):
"""UpdateLoadBalancerResponse - a model defined in huaweicloud sdk"""
super(UpdateLoadBalancerResponse, self).__init__()
self._request_id = None
self._loadbalancer = None
self.discriminator = None
if request_id is not None:
self.request_id = request_id
if loadbalancer is not None:
self.loadbalancer = loadbalancer
@property
def request_id(self):
"""Gets the request_id of this UpdateLoadBalancerResponse.
请求ID。 注:自动生成 。
:return: The request_id of this UpdateLoadBalancerResponse.
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this UpdateLoadBalancerResponse.
请求ID。 注:自动生成 。
:param request_id: The request_id of this UpdateLoadBalancerResponse.
:type: str
"""
self._request_id = request_id
@property
def loadbalancer(self):
"""Gets the loadbalancer of this UpdateLoadBalancerResponse.
:return: The loadbalancer of this UpdateLoadBalancerResponse.
:rtype: LoadBalancer
"""
return self._loadbalancer
@loadbalancer.setter
def loadbalancer(self, loadbalancer):
"""Sets the loadbalancer of this UpdateLoadBalancerResponse.
:param loadbalancer: The loadbalancer of this UpdateLoadBalancerResponse.
:type: LoadBalancer
"""
self._loadbalancer = loadbalancer
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateLoadBalancerResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.632353 | 81 | 0.581692 | [
"Apache-2.0"
] | JeffreyDin/huaweicloud-sdk-python-v3 | huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/update_load_balancer_response.py | 3,798 | Python |
#!/usr/bin/env python3
#
# Extract a CSV of findings for a particular bucket
#
import boto3
from botocore.exceptions import ClientError
import json
import os
import time
import csv
from time import sleep
from datetime import datetime
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
CSV_HEADER = ['AccountId', 'BucketName', 'Region', 'FileExtension', 'Severity', 'FindingType',
'FindingCount', 'Details', 'ObjectKey', 'S3Path', 'URLPath', 'FindingConsoleURL', 'Finding Creation Date', 'Object-level Public ACL']
def main(args, logger):
# Macie is regional even though buckets aren't. So we need to iterate across regions to find out bucket
# Unless you know already
if args.region:
regions = [args.region]
else:
regions = get_regions()
# Store bucket results
results = {
"Low": 0,
"Medium": 0,
"High": 0
}
with open(args.filename, 'w') as csvoutfile:
writer = csv.writer(csvoutfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(CSV_HEADER)
for r in regions:
macie_client = boto3.client('macie2', region_name=r)
# Build a Findings criteria dictionary to pass to Macie2
findingCriteria = {'criterion': {'category': {'eq': ['CLASSIFICATION']}}}
if args.bucket:
findingCriteria['criterion']['resourcesAffected.s3Bucket.name'] = {'eq': [args.bucket]}
if args.severity:
if args.severity == "High":
findingCriteria['criterion']['severity.description'] = {'eq': ["High"]}
elif args.severity == "Medium":
findingCriteria['criterion']['severity.description'] = {'eq': ["High", "Medium"]}
else:
# No need to add a severity filter
pass
if args.since:
end_time = datetime.now()
start_time = datetime.strptime(args.since, "%Y-%m-%d")
findingCriteria['criterion']['createdAt'] = {
'gte': int(start_time.timestamp())*1000,
'lte': int(end_time.timestamp())*1000
}
logger.debug(f"findingCriteria: {json.dumps(findingCriteria, indent=2)}")
# Macie is annyoing in that I have to list each findings, then pass the list of ids to the
# get_findings() API to get any useful details. Bah
list_response = macie_client.list_findings(
findingCriteria=findingCriteria,
maxResults=40
)
findings = list_response['findingIds']
logger.debug(f"Found {len(findings)} findings in {r}")
if len(findings) == 0:
# No findings in this region, move along
continue
# Now get the meat of these findings
get_response = macie_client.get_findings(findingIds=findings)
for f in get_response['findings']:
bucket_name = f['resourcesAffected']['s3Bucket']['name']
key = f['resourcesAffected']['s3Object']['key']
summary, count = get_summary(f)
obj_publicAccess = "Unknown"
if 'publicAccess' in f['resourcesAffected']['s3Object']:
obj_publicAccess = f['resourcesAffected']['s3Object']['publicAccess']
writer.writerow([f['accountId'], bucket_name, r,
f['resourcesAffected']['s3Object']['extension'],
f['severity']['description'], f['type'],
count, summary, key,
f"s3://{bucket_name}/{key}",
f"https://{bucket_name}.s3.amazonaws.com/{key}",
f"https://{r}.console.aws.amazon.com/macie/home?region={r}#findings?search=resourcesAffected.s3Bucket.name%3D{bucket_name}¯os=current&itemId={f['id']}",
f['createdAt'], obj_publicAccess
])
results[f['severity']['description']] += 1
# pagination is a pita. Here we continue to the List pagination
while 'nextToken' in list_response:
sleep(0.5)
list_response = macie_client.list_findings(
findingCriteria=findingCriteria,
maxResults=40,
nextToken=list_response['nextToken']
)
findings = list_response['findingIds']
logger.debug(f"Found {len(findings)} more findings in {r}")
get_response = macie_client.get_findings(findingIds=findings)
for f in get_response['findings']:
bucket_name = f['resourcesAffected']['s3Bucket']['name']
key = f['resourcesAffected']['s3Object']['key']
summary, count = get_summary(f)
obj_publicAccess = "Unknown"
if 'publicAccess' in f['resourcesAffected']['s3Object']:
obj_publicAccess = f['resourcesAffected']['s3Object']['publicAccess']
writer.writerow([f['accountId'], bucket_name, r,
f['resourcesAffected']['s3Object']['extension'],
f['severity']['description'], f['type'],
count, summary, key,
f"s3://{bucket_name}/{key}",
f"https://{bucket_name}.s3.amazonaws.com/{key}",
f"https://{r}.console.aws.amazon.com/macie/home?region={r}#findings?search=resourcesAffected.s3Bucket.name%3D{bucket_name}¯os=current&itemId={f['id']}",
f['createdAt'], obj_publicAccess
])
results[f['severity']['description']] += 1
print(f"Exported High: {results['High']} Medium: {results['Medium']} Low: {results['Low']} ")
csvoutfile.close()
def get_summary(finding):
summary = []
count = 0
for data_type in finding['classificationDetails']['result']['sensitiveData']:
summary.append(f"{data_type['category']}: {data_type['totalCount']}")
count += data_type['totalCount']
return("\n".join(summary), count)
def get_regions():
"""Return an array of the regions this account is active in. Ordered with us-east-1 in the front."""
ec2 = boto3.client('ec2')
response = ec2.describe_regions()
output = ['us-east-1']
for r in response['Regions']:
if r['RegionName'] == "us-east-1":
continue
output.append(r['RegionName'])
return(output)
def do_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="print debugging info", action='store_true')
parser.add_argument("--error", help="print error info only", action='store_true')
parser.add_argument("--region", help="Only Process this region")
parser.add_argument("--bucket", help="Only price out this bucket")
parser.add_argument("--filename", help="Save to filename", required=True)
parser.add_argument("--since", help="Only output findings after this date - specified as YYYY-MM-DD")
parser.add_argument("--severity", help="Filter on this severity and higher",
choices=['High', 'Medium', 'Low'], default='Medium')
args = parser.parse_args()
return(args)
if __name__ == '__main__':
args = do_args()
# Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging
# create console handler and set level to debug
ch = logging.StreamHandler()
if args.error:
logger.setLevel(logging.ERROR)
elif args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# create formatter
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
# # Sanity check region
# if args.region:
# os.environ['AWS_DEFAULT_REGION'] = args.region
# if 'AWS_DEFAULT_REGION' not in os.environ:
# logger.error("AWS_DEFAULT_REGION Not set. Aborting...")
# exit(1)
try:
main(args, logger)
except KeyboardInterrupt:
exit(1)
| 41.446009 | 192 | 0.57295 | [
"Apache-2.0"
] | jchrisfarris/aws-macie-automations | scripts/extract_findings_to_csv.py | 8,828 | Python |
import pytest
from uyaml import Yaml
from report.workflow.settings import (
ConfluenceSettings,
Settings,
_Credentials,
_Page,
_UnifiedSettings,
)
from tests.fake import SECTION
@pytest.fixture()
def credentials() -> _Credentials:
return _Credentials('foo', 'bar')
@pytest.fixture()
def page() -> _Page:
return _Page('Home', 'Salary')
@pytest.fixture()
def unified_settings() -> Settings:
return _UnifiedSettings(SECTION)
@pytest.fixture()
def settings(fake_yaml: Yaml) -> Settings:
return ConfluenceSettings(fake_yaml)
def test_credentials_username(credentials: _Credentials) -> None:
assert credentials.username == 'foo'
def test_credentials_api_key(credentials: _Credentials) -> None:
assert credentials.api_key == 'bar'
def test_credentials_from_dict() -> None:
assert isinstance(
_Credentials.from_dict({'username': 'foo', 'api-key': 'bar'}),
_Credentials,
)
def test_credentials_as_str(credentials: _Credentials) -> None:
assert str(credentials) == '[_Credentials: user = foo]'
def test_page_parent(page: _Page) -> None:
assert page.parent == 'Home'
def test_page_target(page: _Page) -> None:
assert page.target == 'Salary'
def test_page_from_dict() -> None:
assert _Page.from_dict({'parent': 'Home', 'target': 'Salary'})
def test_unified_settings_url(unified_settings: _UnifiedSettings) -> None:
assert unified_settings.url == ''
def test_unified_settings_page(unified_settings: _UnifiedSettings) -> None:
assert isinstance(unified_settings.page, _Page)
def test_unified_settings_credentials(
unified_settings: _UnifiedSettings,
) -> None:
assert isinstance(unified_settings.credentials, _Credentials)
def test_unified_settings_content(unified_settings: _UnifiedSettings) -> None:
assert unified_settings._UnifiedSettings__content('page') == {
'parent': 'Home',
'target': 'Salary',
}
def test_settings_url(settings: ConfluenceSettings) -> None:
assert settings.url == ''
def test_settings_page(settings: ConfluenceSettings) -> None:
assert isinstance(settings.page, _Page)
def test_settings_credentials(settings: ConfluenceSettings) -> None:
assert isinstance(settings.credentials, _Credentials)
| 23.947368 | 78 | 0.727473 | [
"MIT"
] | vyahello/pytest-confluence-report | tests/test_settings.py | 2,275 | Python |
def sub1(a,b):
c = a-b
return c
def divide1(a,b):
c = a/b
return c | 11.142857 | 18 | 0.525641 | [
"MIT"
] | JaysreeBora/Learn-Python-in-7-Days | Chapter08/module2.py | 78 | Python |
Subsets and Splits