code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
import sys
import socket
import time
import signal
from timeit import default_timer as timer
try:
hostrange = sys.argv[1]
portrange = sys.argv[2]
except IndexError:
print("Usage: main.py hostrange [portrange]")
sys.exit(1)
stats = {
'Errors': [],
'Successes': [],
'Hosts': 0
}
def parse_range (range):
if '-' in range:
limits = range.split('-')
return int(limits[0]), int(limits[1])
else:
return int(range), int(range)
def parse_hosts (range):
hosts = []
segments = range.split('.')
for segment in segments:
hosts.append(parse_range(segment))
return hosts
def ping (host, port):
port = int(port)
success = False
# New Socket
s = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
# 1sec Timeout
s.settimeout(1)
# Start a timer
s_start = timer()
# Try to Connect
try:
s.connect((host, int(port)))
s.shutdown(socket.SHUT_RD)
success = True
# Connection Timed Out
except socket.timeout:
stats['Errors'].append(f"Failed to connect to {host}[{port}]: timed out")
except OSError as e:
stats['Errors'].append(f"Failed to connect to {host}[{port}]: " + str(e))
# Stop Timer
s_stop = timer()
s_runtime = "%.2f" % (1000 * (s_stop - s_start))
if success:
stats['Successes'].append(f"Connected to {host}[{port}]: tcp_seq=1 time={s_runtime} ms")
def exit (signal, frame):
get_results()
sys.exit(0)
def get_results ():
for error in stats['Errors']:
print(error)
for succ in stats['Successes']:
print(succ)
print(f"Hosts scanned: {stats['Hosts']}")
def generate_range (_range):
lo, hi = _range
return range(lo, hi+1)
signal.signal(signal.SIGINT, exit)
s1, s2, s3, s4 = parse_hosts(hostrange)
p = parse_range(portrange)
for i in generate_range(s1):
for j in generate_range(s2):
for k in generate_range(s3):
for l in generate_range(s4):
for port in generate_range(p):
ping(f"{i}.{j}.{k}.{l}", port)
stats['Hosts'] += 1
get_results()
| [
"timeit.default_timer",
"signal.signal",
"socket.socket",
"sys.exit"
] | [((1776, 1810), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'exit'], {}), '(signal.SIGINT, exit)\n', (1789, 1810), False, 'import signal\n'), ((744, 793), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (757, 793), False, 'import socket\n'), ((874, 881), 'timeit.default_timer', 'timer', ([], {}), '()\n', (879, 881), True, 'from timeit import default_timer as timer\n'), ((1283, 1290), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1288, 1290), True, 'from timeit import default_timer as timer\n'), ((1507, 1518), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1515, 1518), False, 'import sys\n'), ((228, 239), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (236, 239), False, 'import sys\n')] |
"""
Given an array of size n, find the majority element. The majority element is the element that appears more than
floor(n/2) times.
You may assume that the array is non-empty and the majority element always exist in the array.
Example :
Input : [2, 1, 2]
Return : 2 which occurs 2 times which is greater than 3/2.
"""
class Solution:
def majority_element(self, arr):
n = len(arr)
f = n//2
from collections import Counter
d = Counter(arr)
p = d.most_common(1)
if p[0][1] > f:
return p[0][0]
else:
return 0
def method_02(self, arr):
n = len(arr)
for i in arr:
if arr.count(i) > n//2:
return i
s = Solution()
a = [1, 1, 1, 2, 2]
print(s.majority_element(a))
print(s.method_02(a))
| [
"collections.Counter"
] | [((470, 482), 'collections.Counter', 'Counter', (['arr'], {}), '(arr)\n', (477, 482), False, 'from collections import Counter\n')] |
from constants import my_path
from parser import extract_all_occurrences
from utils import find_all_path_in_folder_recur, filter_list_only_filename, filter_list_lowercase
STATIC_FILES_EXTENSIONS = ['js', 'css']
static_files_in_filesystem = []
for extension in STATIC_FILES_EXTENSIONS:
static_files_in_filesystem += find_all_path_in_folder_recur(my_path, extension)
static_files_in_filesystem_filename = filter_list_only_filename(static_files_in_filesystem)
static_files_in_filesystem_filename_lower = filter_list_lowercase(static_files_in_filesystem_filename)
references_path = extract_all_occurrences(my_path)
references_files = filter_list_only_filename(references_path)
print('Suspects list:')
for i in references_files:
if i not in static_files_in_filesystem_filename and i.lower() in static_files_in_filesystem_filename_lower:
print(i)
# res = subprocess.check_output('find . -name "*{}*"'.format(i), stderr=subprocess.STDOUT) | [
"utils.filter_list_lowercase",
"parser.extract_all_occurrences",
"utils.find_all_path_in_folder_recur",
"utils.filter_list_only_filename"
] | [((411, 464), 'utils.filter_list_only_filename', 'filter_list_only_filename', (['static_files_in_filesystem'], {}), '(static_files_in_filesystem)\n', (436, 464), False, 'from utils import find_all_path_in_folder_recur, filter_list_only_filename, filter_list_lowercase\n'), ((509, 567), 'utils.filter_list_lowercase', 'filter_list_lowercase', (['static_files_in_filesystem_filename'], {}), '(static_files_in_filesystem_filename)\n', (530, 567), False, 'from utils import find_all_path_in_folder_recur, filter_list_only_filename, filter_list_lowercase\n'), ((587, 619), 'parser.extract_all_occurrences', 'extract_all_occurrences', (['my_path'], {}), '(my_path)\n', (610, 619), False, 'from parser import extract_all_occurrences\n'), ((639, 681), 'utils.filter_list_only_filename', 'filter_list_only_filename', (['references_path'], {}), '(references_path)\n', (664, 681), False, 'from utils import find_all_path_in_folder_recur, filter_list_only_filename, filter_list_lowercase\n'), ((322, 371), 'utils.find_all_path_in_folder_recur', 'find_all_path_in_folder_recur', (['my_path', 'extension'], {}), '(my_path, extension)\n', (351, 371), False, 'from utils import find_all_path_in_folder_recur, filter_list_only_filename, filter_list_lowercase\n')] |
import bpy
import os
import sys
'''
Simplifies mesh to target number of faces
Requires Blender 2.8
Author: <NAME>
@input:
<obj_file>
<target_faces> number of target faces
<outfile> name of simplified .obj file
@output:
simplified mesh .obj
to run it from cmd line:
/opt/blender/blender --background --python blender_process.py /home/rana/koala.obj 1000 /home/rana/koala_1000.obj
'''
class Process:
def __init__(self, obj_file, target_faces, export_name):
assert (bpy.context.selected_objects != []), 'ERROR: no file present in blender context'
mesh = self.load_obj(obj_file)
self.hadLooseGeometry_initialRun = False
self.delete_looseGeometry(mesh, "initial_run")
self.hadDouble_vertices = False
#self.clean_doubles(mesh)
self.hadZeroArea_edges_or_faces = False
self.repair_zeroArea_faces(mesh)
self.simplify(mesh, target_faces)
self.export_obj(mesh, export_name)
# In case of again loose geometry after the whole process, export a duplicate without this loose geometry
self.hadLooseGeometry_postRun = False
self.delete_looseGeometry(mesh, "post_run")
if self.hadLooseGeometry_postRun:
fileName_parts = export_name.split(".")
new_export_name = fileName_parts[0] + "_no_loose_geometry." + fileName_parts[1]
self.export_obj(mesh, new_export_name)
def load_obj(self, obj_file):
bpy.ops.import_scene.obj(filepath=obj_file, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl",
use_edges=True,
use_smooth_groups=True, use_split_objects=False, use_split_groups=False,
use_groups_as_vgroups=False, use_image_search=True, split_mode='ON')
ob = bpy.context.selected_objects[0]
assert (ob.type == 'MESH'), 'ERROR: object type does not match MESH type'
return ob
def subsurf(self, mesh):
# subdivide mesh
bpy.context.view_layer.objects.active = mesh
mod = mesh.modifiers.new(name='Subsurf', type='SUBSURF')
mod.subdivision_type = 'SIMPLE'
bpy.ops.object.modifier_apply(modifier=mod.name)
# now triangulate
mod = mesh.modifiers.new(name='Triangluate', type='TRIANGULATE')
bpy.ops.object.modifier_apply(modifier=mod.name)
def simplify(self, mesh, target_faces):
bpy.context.view_layer.objects.active = mesh
mod = mesh.modifiers.new(name='Decimate', type='DECIMATE')
bpy.context.object.modifiers['Decimate'].use_collapse_triangulate = True
# upsample mesh if too low poly
nfaces = len(mesh.data.polygons)
while nfaces < target_faces:
self.subsurf(mesh)
nfaces = len(mesh.data.polygons)
ratio = target_faces / float(nfaces)
mod.ratio = float('%s' % ('%.6g' % (ratio)))
print('faces: ', mod.face_count, mod.ratio)
bpy.ops.object.modifier_apply(modifier=mod.name)
#sind die bpy.context.selected_objects[0] aufrufe evtl. schuld an schlechtem reduzieren, da nicht das ganze mesh ausgewählt wird?
#difference between context.scene.objects and context.view_layer.objects
def clean_doubles(self, mesh):
bpy.context.view_layer.objects.active = mesh
vertexCount_before = len(mesh.data.vertices)
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
#bpy.ops.gpencil.stroke_merge_by_distance(threshold=0.001)
bpy.ops.mesh.remove_doubles(threshold=0.0001, use_unselected=False)
bpy.ops.object.editmode_toggle()
vertexCount_after = len(mesh.data.vertices)
if vertexCount_after < vertexCount_before:
print('Remove double vertices for: %s' % mesh.name)
self.hadDouble_vertices = True
def repair_zeroArea_faces(self, mesh):
# For now simply apply retriangulation
bpy.context.view_layer.objects.active = mesh
mod = mesh.modifiers.new(name='Triangulate', type='TRIANGULATE')
bpy.ops.object.modifier_apply(modifier=mod.name)
# Dissolve zero area faces and zero length edges
vertexCount_before = len(mesh.data.vertices)
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.dissolve_degenerate(threshold=0.0001)
bpy.ops.object.editmode_toggle()
vertexCount_after = len(mesh.data.vertices)
if vertexCount_after < vertexCount_before:
print('Remove zeroArea faces or edges for: %s' % mesh.name)
self.hadZeroArea_edges_or_faces = True
def delete_looseGeometry(self, mesh, indicator):
bpy.context.view_layer.objects.active = mesh
vertexCount_before = len(mesh.data.vertices)
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.delete_loose(use_verts=True, use_edges=True, use_faces=True)
bpy.ops.object.editmode_toggle()
vertexCount_after = len(mesh.data.vertices)
if vertexCount_after < vertexCount_before:
print('Remove loose geometry at ' + indicator + ' for: %s' % mesh.name)
if indicator == "initial_run":
self.hadLooseGeometry_initialRun = True
if indicator == "post_run":
self.hadLooseGeometry_postRun = True
def export_obj(self, mesh, export_name):
outpath = os.path.dirname(export_name)
if not os.path.isdir(outpath): os.makedirs(outpath)
print('EXPORTING', export_name)
bpy.ops.object.select_all(action='DESELECT')
mesh.select_set(state=True)
bpy.ops.export_scene.obj(filepath=export_name, check_existing=False, filter_glob="*.obj;*.mtl",
use_selection=True, use_animation=False, use_mesh_modifiers=True, use_edges=True,
use_smooth_groups=False, use_smooth_groups_bitflags=False, use_normals=True,
use_uvs=False, use_materials=False, use_triangles=True, use_nurbs=False,
use_vertex_groups=False, use_blen_objects=True, group_by_object=False,
group_by_material=False, keep_vertex_order=True, global_scale=1, path_mode='AUTO',
axis_forward='-Z', axis_up='Y')
"""
obj_file = sys.argv[-3]
target_faces = int(sys.argv[-2])
export_name = sys.argv[-1]
print('args: ', obj_file, target_faces, export_name)
blender = Process(obj_file, target_faces, export_name)
"""
| [
"bpy.ops.object.editmode_toggle",
"bpy.ops.mesh.select_all",
"bpy.ops.export_scene.obj",
"os.makedirs",
"bpy.ops.object.select_all",
"bpy.ops.mesh.dissolve_degenerate",
"os.path.dirname",
"bpy.ops.mesh.remove_doubles",
"os.path.isdir",
"bpy.ops.import_scene.obj",
"bpy.ops.mesh.delete_loose",
"bpy.ops.object.modifier_apply"
] | [((1470, 1743), 'bpy.ops.import_scene.obj', 'bpy.ops.import_scene.obj', ([], {'filepath': 'obj_file', 'axis_forward': '"""-Z"""', 'axis_up': '"""Y"""', 'filter_glob': '"""*.obj;*.mtl"""', 'use_edges': '(True)', 'use_smooth_groups': '(True)', 'use_split_objects': '(False)', 'use_split_groups': '(False)', 'use_groups_as_vgroups': '(False)', 'use_image_search': '(True)', 'split_mode': '"""ON"""'}), "(filepath=obj_file, axis_forward='-Z', axis_up='Y',\n filter_glob='*.obj;*.mtl', use_edges=True, use_smooth_groups=True,\n use_split_objects=False, use_split_groups=False, use_groups_as_vgroups=\n False, use_image_search=True, split_mode='ON')\n", (1494, 1743), False, 'import bpy\n'), ((2196, 2244), 'bpy.ops.object.modifier_apply', 'bpy.ops.object.modifier_apply', ([], {'modifier': 'mod.name'}), '(modifier=mod.name)\n', (2225, 2244), False, 'import bpy\n'), ((2352, 2400), 'bpy.ops.object.modifier_apply', 'bpy.ops.object.modifier_apply', ([], {'modifier': 'mod.name'}), '(modifier=mod.name)\n', (2381, 2400), False, 'import bpy\n'), ((2999, 3047), 'bpy.ops.object.modifier_apply', 'bpy.ops.object.modifier_apply', ([], {'modifier': 'mod.name'}), '(modifier=mod.name)\n', (3028, 3047), False, 'import bpy\n'), ((3403, 3435), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (3433, 3435), False, 'import bpy\n'), ((3444, 3484), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (3467, 3484), False, 'import bpy\n'), ((3560, 3627), 'bpy.ops.mesh.remove_doubles', 'bpy.ops.mesh.remove_doubles', ([], {'threshold': '(0.0001)', 'use_unselected': '(False)'}), '(threshold=0.0001, use_unselected=False)\n', (3587, 3627), False, 'import bpy\n'), ((3636, 3668), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (3666, 3668), False, 'import bpy\n'), ((4105, 4153), 'bpy.ops.object.modifier_apply', 'bpy.ops.object.modifier_apply', ([], {'modifier': 'mod.name'}), '(modifier=mod.name)\n', (4134, 4153), False, 'import bpy\n'), ((4273, 4305), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (4303, 4305), False, 'import bpy\n'), ((4314, 4354), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (4337, 4354), False, 'import bpy\n'), ((4363, 4413), 'bpy.ops.mesh.dissolve_degenerate', 'bpy.ops.mesh.dissolve_degenerate', ([], {'threshold': '(0.0001)'}), '(threshold=0.0001)\n', (4395, 4413), False, 'import bpy\n'), ((4422, 4454), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (4452, 4454), False, 'import bpy\n'), ((4850, 4882), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (4880, 4882), False, 'import bpy\n'), ((4891, 4931), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (4914, 4931), False, 'import bpy\n'), ((4940, 5013), 'bpy.ops.mesh.delete_loose', 'bpy.ops.mesh.delete_loose', ([], {'use_verts': '(True)', 'use_edges': '(True)', 'use_faces': '(True)'}), '(use_verts=True, use_edges=True, use_faces=True)\n', (4965, 5013), False, 'import bpy\n'), ((5022, 5054), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (5052, 5054), False, 'import bpy\n'), ((5499, 5527), 'os.path.dirname', 'os.path.dirname', (['export_name'], {}), '(export_name)\n', (5514, 5527), False, 'import os\n'), ((5636, 5680), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""DESELECT"""'}), "(action='DESELECT')\n", (5661, 5680), False, 'import bpy\n'), ((5725, 6266), 'bpy.ops.export_scene.obj', 'bpy.ops.export_scene.obj', ([], {'filepath': 'export_name', 'check_existing': '(False)', 'filter_glob': '"""*.obj;*.mtl"""', 'use_selection': '(True)', 'use_animation': '(False)', 'use_mesh_modifiers': '(True)', 'use_edges': '(True)', 'use_smooth_groups': '(False)', 'use_smooth_groups_bitflags': '(False)', 'use_normals': '(True)', 'use_uvs': '(False)', 'use_materials': '(False)', 'use_triangles': '(True)', 'use_nurbs': '(False)', 'use_vertex_groups': '(False)', 'use_blen_objects': '(True)', 'group_by_object': '(False)', 'group_by_material': '(False)', 'keep_vertex_order': '(True)', 'global_scale': '(1)', 'path_mode': '"""AUTO"""', 'axis_forward': '"""-Z"""', 'axis_up': '"""Y"""'}), "(filepath=export_name, check_existing=False,\n filter_glob='*.obj;*.mtl', use_selection=True, use_animation=False,\n use_mesh_modifiers=True, use_edges=True, use_smooth_groups=False,\n use_smooth_groups_bitflags=False, use_normals=True, use_uvs=False,\n use_materials=False, use_triangles=True, use_nurbs=False,\n use_vertex_groups=False, use_blen_objects=True, group_by_object=False,\n group_by_material=False, keep_vertex_order=True, global_scale=1,\n path_mode='AUTO', axis_forward='-Z', axis_up='Y')\n", (5749, 6266), False, 'import bpy\n'), ((5543, 5565), 'os.path.isdir', 'os.path.isdir', (['outpath'], {}), '(outpath)\n', (5556, 5565), False, 'import os\n'), ((5567, 5587), 'os.makedirs', 'os.makedirs', (['outpath'], {}), '(outpath)\n', (5578, 5587), False, 'import os\n')] |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: GetUserInfo.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import Common_pb2 as Common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='GetUserInfo.proto',
package='GetUserInfo',
syntax='proto2',
serialized_pb=_b('\n\x11GetUserInfo.proto\x12\x0bGetUserInfo\x1a\x0c\x43ommon.proto\"\x15\n\x03\x43\x32S\x12\x0e\n\x06userID\x18\x01 \x02(\x04\"\xa7\x01\n\x03S2C\x12\x10\n\x08nickName\x18\x01 \x02(\t\x12\x11\n\tavatarUrl\x18\x02 \x02(\t\x12\x10\n\x08\x61piLevel\x18\x03 \x02(\t\x12\x12\n\nhkQotRight\x18\x04 \x02(\x05\x12\x12\n\nusQotRight\x18\x05 \x02(\x05\x12\x12\n\ncnQotRight\x18\x06 \x02(\x05\x12\x1d\n\x15isNeedAgreeDisclaimer\x18\x07 \x02(\x08\x12\x0e\n\x06userID\x18\x08 \x02(\x03\"(\n\x07Request\x12\x1d\n\x03\x63\x32s\x18\x01 \x02(\x0b\x32\x10.GetUserInfo.C2S\"a\n\x08Response\x12\x15\n\x07retType\x18\x01 \x02(\x05:\x04-400\x12\x0e\n\x06retMsg\x18\x02 \x01(\t\x12\x0f\n\x07\x65rrCode\x18\x03 \x01(\x05\x12\x1d\n\x03s2c\x18\x04 \x01(\x0b\x32\x10.GetUserInfo.S2C*[\n\x08QotRight\x12\x13\n\x0fQotRight_Unknow\x10\x00\x12\x10\n\x0cQotRight_Bmp\x10\x01\x12\x13\n\x0fQotRight_Level1\x10\x02\x12\x13\n\x0fQotRight_Level2\x10\x03')
,
dependencies=[Common__pb2.DESCRIPTOR,])
_QOTRIGHT = _descriptor.EnumDescriptor(
name='QotRight',
full_name='GetUserInfo.QotRight',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='QotRight_Unknow', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QotRight_Bmp', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QotRight_Level1', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QotRight_Level2', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=382,
serialized_end=473,
)
_sym_db.RegisterEnumDescriptor(_QOTRIGHT)
QotRight = enum_type_wrapper.EnumTypeWrapper(_QOTRIGHT)
QotRight_Unknow = 0
QotRight_Bmp = 1
QotRight_Level1 = 2
QotRight_Level2 = 3
_C2S = _descriptor.Descriptor(
name='C2S',
full_name='GetUserInfo.C2S',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='userID', full_name='GetUserInfo.C2S.userID', index=0,
number=1, type=4, cpp_type=4, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=69,
)
_S2C = _descriptor.Descriptor(
name='S2C',
full_name='GetUserInfo.S2C',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nickName', full_name='GetUserInfo.S2C.nickName', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='avatarUrl', full_name='GetUserInfo.S2C.avatarUrl', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='apiLevel', full_name='GetUserInfo.S2C.apiLevel', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hkQotRight', full_name='GetUserInfo.S2C.hkQotRight', index=3,
number=4, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='usQotRight', full_name='GetUserInfo.S2C.usQotRight', index=4,
number=5, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cnQotRight', full_name='GetUserInfo.S2C.cnQotRight', index=5,
number=6, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isNeedAgreeDisclaimer', full_name='GetUserInfo.S2C.isNeedAgreeDisclaimer', index=6,
number=7, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='userID', full_name='GetUserInfo.S2C.userID', index=7,
number=8, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=72,
serialized_end=239,
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='GetUserInfo.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='c2s', full_name='GetUserInfo.Request.c2s', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=241,
serialized_end=281,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='GetUserInfo.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='retType', full_name='GetUserInfo.Response.retType', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=True, default_value=-400,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retMsg', full_name='GetUserInfo.Response.retMsg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='errCode', full_name='GetUserInfo.Response.errCode', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s2c', full_name='GetUserInfo.Response.s2c', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=283,
serialized_end=380,
)
_REQUEST.fields_by_name['c2s'].message_type = _C2S
_RESPONSE.fields_by_name['s2c'].message_type = _S2C
DESCRIPTOR.message_types_by_name['C2S'] = _C2S
DESCRIPTOR.message_types_by_name['S2C'] = _S2C
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
DESCRIPTOR.enum_types_by_name['QotRight'] = _QOTRIGHT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
C2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(
DESCRIPTOR = _C2S,
__module__ = 'GetUserInfo_pb2'
# @@protoc_insertion_point(class_scope:GetUserInfo.C2S)
))
_sym_db.RegisterMessage(C2S)
S2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(
DESCRIPTOR = _S2C,
__module__ = 'GetUserInfo_pb2'
# @@protoc_insertion_point(class_scope:GetUserInfo.S2C)
))
_sym_db.RegisterMessage(S2C)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
DESCRIPTOR = _REQUEST,
__module__ = 'GetUserInfo_pb2'
# @@protoc_insertion_point(class_scope:GetUserInfo.Request)
))
_sym_db.RegisterMessage(Request)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE,
__module__ = 'GetUserInfo_pb2'
# @@protoc_insertion_point(class_scope:GetUserInfo.Response)
))
_sym_db.RegisterMessage(Response)
# @@protoc_insertion_point(module_scope)
| [
"google.protobuf.descriptor.EnumValueDescriptor",
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper"
] | [((539, 565), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (563, 565), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((2482, 2526), 'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper', 'enum_type_wrapper.EnumTypeWrapper', (['_QOTRIGHT'], {}), '(_QOTRIGHT)\n', (2515, 2526), False, 'from google.protobuf.internal import enum_type_wrapper\n'), ((1846, 1949), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""QotRight_Unknow"""', 'index': '(0)', 'number': '(0)', 'options': 'None', 'type': 'None'}), "(name='QotRight_Unknow', index=0, number=0,\n options=None, type=None)\n", (1877, 1949), True, 'from google.protobuf import descriptor as _descriptor\n'), ((1970, 2070), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""QotRight_Bmp"""', 'index': '(1)', 'number': '(1)', 'options': 'None', 'type': 'None'}), "(name='QotRight_Bmp', index=1, number=1,\n options=None, type=None)\n", (2001, 2070), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2091, 2194), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""QotRight_Level1"""', 'index': '(2)', 'number': '(2)', 'options': 'None', 'type': 'None'}), "(name='QotRight_Level1', index=2, number=2,\n options=None, type=None)\n", (2122, 2194), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2215, 2318), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""QotRight_Level2"""', 'index': '(3)', 'number': '(3)', 'options': 'None', 'type': 'None'}), "(name='QotRight_Level2', index=3, number=3,\n options=None, type=None)\n", (2246, 2318), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2758, 3074), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""userID"""', 'full_name': '"""GetUserInfo.C2S.userID"""', 'index': '(0)', 'number': '(1)', 'type': '(4)', 'cpp_type': '(4)', 'label': '(2)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='userID', full_name=\n 'GetUserInfo.C2S.userID', index=0, number=1, type=4, cpp_type=4, label=\n 2, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None, file=DESCRIPTOR)\n", (2785, 3074), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4555, 4878), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""hkQotRight"""', 'full_name': '"""GetUserInfo.S2C.hkQotRight"""', 'index': '(3)', 'number': '(4)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(2)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='hkQotRight', full_name=\n 'GetUserInfo.S2C.hkQotRight', index=3, number=4, type=5, cpp_type=1,\n label=2, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None, file=DESCRIPTOR)\n", (4582, 4878), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4904, 5227), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""usQotRight"""', 'full_name': '"""GetUserInfo.S2C.usQotRight"""', 'index': '(4)', 'number': '(5)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(2)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='usQotRight', full_name=\n 'GetUserInfo.S2C.usQotRight', index=4, number=5, type=5, cpp_type=1,\n label=2, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None, file=DESCRIPTOR)\n", (4931, 5227), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5253, 5576), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""cnQotRight"""', 'full_name': '"""GetUserInfo.S2C.cnQotRight"""', 'index': '(5)', 'number': '(6)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(2)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='cnQotRight', full_name=\n 'GetUserInfo.S2C.cnQotRight', index=5, number=6, type=5, cpp_type=1,\n label=2, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None, file=DESCRIPTOR)\n", (5280, 5576), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5602, 5952), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""isNeedAgreeDisclaimer"""', 'full_name': '"""GetUserInfo.S2C.isNeedAgreeDisclaimer"""', 'index': '(6)', 'number': '(7)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(2)', 'has_default_value': '(False)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='isNeedAgreeDisclaimer', full_name=\n 'GetUserInfo.S2C.isNeedAgreeDisclaimer', index=6, number=7, type=8,\n cpp_type=7, label=2, has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, options=None, file=DESCRIPTOR)\n", (5629, 5952), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5977, 6293), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""userID"""', 'full_name': '"""GetUserInfo.S2C.userID"""', 'index': '(7)', 'number': '(8)', 'type': '(3)', 'cpp_type': '(2)', 'label': '(2)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='userID', full_name=\n 'GetUserInfo.S2C.userID', index=7, number=8, type=3, cpp_type=2, label=\n 2, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None, file=DESCRIPTOR)\n", (6004, 6293), True, 'from google.protobuf import descriptor as _descriptor\n'), ((6687, 7006), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""c2s"""', 'full_name': '"""GetUserInfo.Request.c2s"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(2)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='c2s', full_name='GetUserInfo.Request.c2s',\n index=0, number=1, type=11, cpp_type=10, label=2, has_default_value=\n False, default_value=None, message_type=None, enum_type=None,\n containing_type=None, is_extension=False, extension_scope=None, options\n =None, file=DESCRIPTOR)\n", (6714, 7006), True, 'from google.protobuf import descriptor as _descriptor\n'), ((7404, 7728), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""retType"""', 'full_name': '"""GetUserInfo.Response.retType"""', 'index': '(0)', 'number': '(1)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(2)', 'has_default_value': '(True)', 'default_value': '(-400)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='retType', full_name=\n 'GetUserInfo.Response.retType', index=0, number=1, type=5, cpp_type=1,\n label=2, has_default_value=True, default_value=-400, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None, file=DESCRIPTOR)\n", (7431, 7728), True, 'from google.protobuf import descriptor as _descriptor\n'), ((8121, 8443), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""errCode"""', 'full_name': '"""GetUserInfo.Response.errCode"""', 'index': '(2)', 'number': '(3)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='errCode', full_name=\n 'GetUserInfo.Response.errCode', index=2, number=3, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None, file=DESCRIPTOR)\n", (8148, 8443), True, 'from google.protobuf import descriptor as _descriptor\n'), ((8469, 8788), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""s2c"""', 'full_name': '"""GetUserInfo.Response.s2c"""', 'index': '(3)', 'number': '(4)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='s2c', full_name=\n 'GetUserInfo.Response.s2c', index=3, number=4, type=11, cpp_type=10,\n label=1, has_default_value=False, default_value=None, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None, file=DESCRIPTOR)\n", (8496, 8788), True, 'from google.protobuf import descriptor as _descriptor\n')] |
"""
Reads data in log files.
Sample instance: python read_log_files.py --directory=../1igd/MultiUmbrella17 --output_filename='Test.dat'
"""
import numpy as np
import os
import pickle
#import os.path
import joblib
import glob
import argparse
##############################################################################
# First, read the file
##############################################################################
parser = argparse.ArgumentParser(description='Hi')
parser.add_argument("--directory", help = 'This is the path to the directory containing the PDB files we want to analyze...' )
parser.add_argument("--temperatures", default = "*.***", type = str, help = "Temperatures at which you want to run analysis, as a comma-separated string with no spaces. For instnace you can type --temperatures='0.800, 0.900' or --temperatures = '0.8**' or --temperatures='*.***', the latter being the default ")
parser.add_argument("--variables", default = "natives,energy,rmsd", type = str, help = "Variables you want to read, as a comma-separated string with no spaces. Default is 'natives,energy,rmsd'")
parser.add_argument("--step_multiples", default = "1", type = str, help = "Read only MC steps that are a multiple of this")
parser.add_argument("--min_step", default = '0', help = 'minimum MC step to analyze. Defaults to 0.')
parser.add_argument("--max_step", default = 'inf', help = 'maximum MC step to analyze. Defaults to infinity.')
parser.add_argument("--output_filename", default = "Equilibrium_log_data.dat", help = "A file with this name, which contains the log file data, will be saved in directory. Defaults to Equilibrium_log_data.dat ")
args = parser.parse_args()
directory = args.directory
variables = [item for item in args.variables.split(',')]
step_multiples_to_read = int(args.step_multiples)
min_step=float(args.min_step)
max_step=float(args.max_step)
filename = args.output_filename
def get_temp(filename):
splitline=filename.split(sep='/')
split2=splitline[2].split('_')
#print(split2)
while split2[1][0] not in '0123456789':
del split2[1]
temp=float(split2[1][0:5])
print(temp)
return temp
def read_file(PDB_files, variables):
"""
PDB_files actually means log_files...lol sorry
variables is a list of variables that you want to read from the log files, but they need to be called
exactly what they are called in the first line of the log file
for instance, 'energy', 'natives', etc...
Returns a 3D array data where data[i,j,k] corresponds to log_file i, time j within that log file, and variable k (in case you care about multiple variables like energies, natives, etc)
"""
data=[]
lens=[]
variable_indices=[]
times=[]
temperatures=[]
setpoints=[]
for filecounter, filename in enumerate(PDB_files):
step_index=0
print("Reading file {}".format(filename))
openfile=open(filename)
data.append([])
#energies.append([])
#contacts.append([])
#rmsd.append([])
#temperatures.append(float(temp))
# file 1 variable 1 times variable 2 times
#data=[ [ [ x1, x2, x3 ... ], [y1, y2, y3... ],... ] ]
for line in openfile.readlines():
line=line.rstrip('\n')
if len(line)>0:
entries=line.split()
if 'step #' in line:
fields = ['step'] + line.split()[2:]
#print(fields)
temperature_index=fields.index('temp')
if 'setpoint' in fields:
setpoint_index=fields.index('setpoint')
else:
setpoint_index=np.nan
for variable in variables:
variable_indices.append(fields.index(variable))
data[filecounter].append([])
#print(variable_indices)
if entries[0]=='STEP':
if np.mod(int(entries[1]), step_multiples_to_read)==0 and int(entries[1])>=min_step and int(entries[1])<max_step:
step_index+=1
if filecounter==0:
times.append(int(entries[1]))
#print(entries[variable_indices[1]+1])
if step_index==1: #learn what reporter values we currently have...only need to do this once per log file
temperatures.append(float(entries[temperature_index+1][0:5]))
if 'setpoint' in fields:
setpoints.append(float(entries[setpoint_index+1]))
else:
setpoints.append(0)
for v, variable in enumerate(variables):
data[filecounter][v].append(float(entries[variable_indices[v]+1]))
lens.append(len(data[filecounter][0]))
data[filecounter]=np.array(data[filecounter])
#if filecounter==0:
# print(data[0][1,:])
x=np.zeros((1, len(data[filecounter][0]), len(data[filecounter])))
for v in range(len(variables)):
x[0,:,v]=data[filecounter][v,:]
data[filecounter]=x
#if filecounter==0:
# print(data[filecounter][0,:,1])
nonzero_lengths = [i for i in range(len(lens)) if lens[i]>0]
data = [x for i, x in enumerate(data) if i in nonzero_lengths]
lens = [l for i, l in enumerate(lens) if i in nonzero_lengths]
data=np.vstack((x[:, 0:min(lens), :] for x in data))
#print(data[0,:,1])
#We have created an array data that is files (i.e. conditions) by timepoints by variables
return data, temperatures, setpoints, np.array(times)
All_files=glob.glob('{}/*.log'.format(directory))
log_files=[]
if args.temperatures!='*.***':
temperatures = [float(item) for item in args.temperatures.split(',')]
for file in All_files:
#print(file)
if get_temp(file)==temperature:
print(file)
log_files.append(file)
else:
log_files=All_files
#print (PDB_files)
print('Reading files')
data, temperatures, setpoints, times=read_file(log_files, variables)
#print(data[0,:,1])
print("Saving data")
joblib.dump([data, variables, log_files, temperatures, setpoints, times],"{}/{}".format(directory, filename))
| [
"numpy.array",
"argparse.ArgumentParser"
] | [((514, 555), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Hi"""'}), "(description='Hi')\n", (537, 555), False, 'import argparse\n'), ((4472, 4499), 'numpy.array', 'np.array', (['data[filecounter]'], {}), '(data[filecounter])\n', (4480, 4499), True, 'import numpy as np\n'), ((5168, 5183), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (5176, 5183), True, 'import numpy as np\n')] |
# -*- Encoding: utf-8 -*-
""" CellProfiler.CellProfilerGUI.CPFrame - Cell Profiler's main window
"""
import codecs
import logging
import os
import pdb
import sys
import wx
import wx.adv
import wx.html
import wx.lib.inspection
import wx.lib.scrolledpanel
from cellprofiler_core.preferences import EXT_PIPELINE
from cellprofiler_core.preferences import EXT_PROJECT
from cellprofiler_core.preferences import get_show_sampling
from cellprofiler_core.preferences import get_startup_blurb
from cellprofiler_core.utilities.core.modules import instantiate_module
import cellprofiler
import cellprofiler.gui
import cellprofiler.gui.utilities.icon
from ._welcome_frame import WelcomeFrame
from ._workspace_model import Workspace
from .utilities.figure import close_all
from .help.content import read_content
from .help.menu import Menu
from .html.htmlwindow import HtmlClickableWindow
from .html.utils import rst_to_html_fragment
from .imagesetctrl import ImageSetCtrl
from .module_view import ModuleView
from .pathlist import PathListCtrl
from .pipeline import Pipeline
from .pipelinecontroller import PipelineController
from .pipelinelistview import PipelineListView
from .preferences_dialog._preferences_dialog import PreferencesDialog
from .preferences_view import PreferencesView
from .utilities.module_view import stop_validation_queue_thread
HELP_ON_FILE_LIST = """\
The *File List* panel displays the image files that are managed by the
**Images**, **Metadata**, **NamesAndTypes** and **Groups** modules.
You can drop files and directories into this window or use the
*Browse…* button to add files to the list. The context menu for the
window lets you display or remove files and lets you remove folders.
The buttons and checkbox along the bottom have the following
functions:
- *Browse…*: Browse for files and folders to add.
- *Clear*: Clear all entries from the File list
- *Show files excluded by filters*: *(Only shown if filtered based on
rules is selected)* Check this to see all files in the list. Uncheck
it to see only the files that pass the rules criteria in the
**Images** module.
- *Expand tree*: Expand all of the folders in the tree
- *Collapse tree*: Collapse the folders in the tree
"""
HELP_ON_MODULE_BUT_NONE_SELECTED = """\
The help button can be used to obtain help for the currently selected
module in the pipeline panel on the left side of the CellProfiler
interface.
You do not have any modules in the pipeline, yet. Add a
module to the pipeline using the “+” button or by using File > Load
Pipeline.\
"""
ID_FILE_NEW_WORKSPACE = wx.ID_NEW
ID_FILE_LOAD = wx.ID_OPEN
ID_FILE_LOAD_PIPELINE = wx.NewId()
ID_FILE_URL_LOAD_PIPELINE = wx.NewId()
ID_FILE_OPEN_IMAGE = wx.NewId()
ID_FILE_EXIT = wx.NewId()
ID_FILE_WIDGET_INSPECTOR = wx.NewId()
ID_FILE_SAVE_PIPELINE = wx.NewId()
ID_FILE_SAVE = wx.ID_SAVE
ID_FILE_SAVE_AS = wx.ID_SAVEAS
ID_FILE_REVERT_TO_SAVED = wx.NewId()
ID_FILE_CLEAR_PIPELINE = wx.NewId()
ID_FILE_EXPORT_IMAGE_SETS = wx.NewId()
ID_FILE_EXPORT_PIPELINE_NOTES = wx.NewId()
ID_FILE_IMPORT_FILE_LIST = wx.NewId()
ID_FILE_ANALYZE_IMAGES = wx.NewId()
ID_FILE_STOP_ANALYSIS = wx.NewId()
ID_FILE_PRINT = wx.NewId()
ID_FILE_PLATEVIEWER = wx.NewId()
ID_FILE_NEW_CP = wx.NewId()
ID_EDIT_SELECT_ALL = wx.NewId()
ID_EDIT_COPY = wx.NewId()
ID_EDIT_DUPLICATE = wx.NewId()
ID_EDIT_UNDO = wx.ID_UNDO
ID_EDIT_MOVE_UP = wx.NewId()
ID_EDIT_MOVE_DOWN = wx.NewId()
ID_EDIT_DELETE = wx.NewId()
ID_EDIT_EXPAND_ALL = wx.NewId()
ID_EDIT_COLLAPSE_ALL = wx.NewId()
ID_EDIT_BROWSE_FOR_FILES = wx.NewId()
ID_EDIT_BROWSE_FOR_FOLDER = wx.NewId()
ID_EDIT_CLEAR_FILE_LIST = wx.NewId()
ID_EDIT_REMOVE_FROM_FILE_LIST = wx.NewId()
ID_EDIT_SHOW_FILE_LIST_IMAGE = wx.NewId()
ID_EDIT_ENABLE_MODULE = wx.NewId()
ID_EDIT_GO_TO_MODULE = wx.NewId()
ID_FIND_USAGES = wx.NewId()
ID_OPTIONS_PREFERENCES = wx.ID_PREFERENCES
ID_CHECK_NEW_VERSION = wx.NewId()
ID_DEBUG_TOGGLE = wx.NewId()
ID_DEBUG_STEP = wx.NewId()
ID_DEBUG_NEXT_IMAGE_SET = wx.NewId()
ID_DEBUG_NEXT_GROUP = wx.NewId()
ID_DEBUG_CHOOSE_GROUP = wx.NewId()
ID_DEBUG_CHOOSE_IMAGE_SET = wx.NewId()
ID_DEBUG_CHOOSE_RANDOM_IMAGE_SET = wx.NewId()
ID_DEBUG_CHOOSE_RANDOM_IMAGE_GROUP = wx.NewId()
ID_DEBUG_RELOAD = wx.NewId()
ID_DEBUG_PDB = wx.NewId()
ID_DEBUG_RUN_FROM_THIS_MODULE = wx.NewId()
ID_DEBUG_STEP_FROM_THIS_MODULE = wx.NewId()
ID_DEBUG_HELP = wx.NewId()
ID_DEBUG_VIEW_WORKSPACE = wx.NewId()
# ~*~
ID_SAMPLE_INIT = wx.NewId()
# ~^~
ID_WINDOW = wx.NewId()
ID_WINDOW_CLOSE_ALL = wx.NewId()
ID_WINDOW_SHOW_ALL_WINDOWS = wx.NewId()
ID_WINDOW_HIDE_ALL_WINDOWS = wx.NewId()
ID_WINDOW_ALL = (
ID_WINDOW_CLOSE_ALL,
ID_WINDOW_SHOW_ALL_WINDOWS,
ID_WINDOW_HIDE_ALL_WINDOWS,
)
WINDOW_IDS = []
ID_HELP_MODULE = wx.NewId()
ID_HELP_SOURCE_CODE = wx.NewId()
class CPFrame(wx.Frame):
def __init__(self, *args, **kwds):
"""Initialize the frame and its layout
"""
kwds["style"] = wx.DEFAULT_FRAME_STYLE
self.__pipeline = Pipeline()
self.__workspace = Workspace(self.__pipeline, None, None, None, None, None)
super(CPFrame, self).__init__(*args, **kwds)
# background_color = cellprofiler_core.preferences.get_background_color()
self.__splitter = wx.SplitterWindow(self, -1, style=wx.SP_BORDER)
#
# Screen size metrics might be used below
#
screen_width = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_X)
screen_height = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)
# Crappy splitters leave crud on the screen because they want custom
# background painting but fail to do it. Here, we have a fight with
# them and beat them.
self.__splitter.SetBackgroundStyle(0)
self.__right_win = wx.Panel(self.__splitter, style=wx.BORDER_NONE)
self.__right_win.SetAutoLayout(True)
self.__left_win = wx.Panel(self.__splitter, style=wx.BORDER_NONE)
# bottom left will be the file browser
self.__module_list_panel = wx.Panel(self.__left_win)
self.__module_list_panel.SetToolTip(
"The pipeline panel contains the modules in the pipeline. Click on the '+' button below or right-click in the panel to begin adding modules."
)
self.__pipeline_test_panel = wx.Panel(self.__left_win, -1)
self.__pipeline_test_panel.SetToolTip(
"The test mode panel is used for previewing the module settings prior to an analysis run. Click the buttons or use the 'Test' menu item to begin testing your module settings."
)
self.__module_controls_panel = wx.Panel(
self.__left_win, -1, style=wx.BORDER_NONE
)
self.__module_controls_panel.SetToolTip(
"The module controls add, remove, move and get help for modules. Click on the '+' button to begin adding modules."
)
#
# The right window has the following structure:
#
# right_win
# Notes window
# path_module_imageset_panel
# path_list_sash
# group_box
# path_list_ctrl
# path_list_filter_checkbox
# path_list_help_button
#
# module_panel
# image_set_list_sash
# image_set_list_ctrl
#
self.__right_win.SetSizer(wx.BoxSizer(wx.VERTICAL))
self.__notes_panel = wx.Panel(self.__right_win)
self.__right_win.GetSizer().Add(self.__notes_panel, 0, wx.EXPAND | wx.ALL)
self.__right_win.GetSizer().AddSpacer(4)
self.__path_module_imageset_panel = wx.Panel(self.__right_win)
self.__right_win.GetSizer().Add(
self.__path_module_imageset_panel, 1, wx.EXPAND | wx.ALL
)
self.__pmi_layout_in_progress = False
self.__path_module_imageset_panel.Bind(
wx.EVT_SIZE, self.__on_path_module_imageset_panel_size
)
########################################################################
#
# The path list control that holds all of the files being dealt with
# by the pipeline
#
########################################################################
#
# Path list sash controls path list sizing
#
self.__path_list_sash = wx.adv.SashLayoutWindow(
self.__path_module_imageset_panel, style=wx.NO_BORDER
)
self.__path_list_sash.Bind(wx.adv.EVT_SASH_DRAGGED, self.__on_sash_drag)
self.__path_list_sash.SetOrientation(wx.adv.LAYOUT_HORIZONTAL)
self.__path_list_sash.SetAlignment(wx.adv.LAYOUT_TOP)
self.__path_list_sash.SetDefaultSize((screen_width, screen_height / 4))
self.__path_list_sash.SetDefaultBorderSize(4)
self.__path_list_sash.SetSashVisible(wx.adv.SASH_BOTTOM, True)
self.__path_list_sash.SetAutoLayout(True)
self.__path_list_sash.Hide()
sizer = wx.BoxSizer(wx.VERTICAL)
self.__path_list_sash.SetSizer(wx.BoxSizer(wx.VERTICAL))
self.__path_list_sash.GetSizer().Add(sizer, 1, wx.EXPAND)
# Add spacer so that group box doesn't cover sash's handle
self.__path_list_sash.GetSizer().AddSpacer(6)
#
# Path list control
#
self.__path_list_ctrl = PathListCtrl(self.__path_list_sash)
self.__path_list_ctrl.SetBackgroundColour(wx.WHITE)
sizer.Add(self.__path_list_ctrl, 1, wx.EXPAND | wx.ALL)
#
# Path list tools horizontal sizer
#
sizer.AddSpacer(2)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(hsizer, 0, wx.EXPAND)
#
# Path list show/hide filtered files checkbox
#
hsizer.AddSpacer(5)
self.__path_list_filter_checkbox = wx.CheckBox(
self.__path_list_sash, label="Show files excluded by filters"
)
hsizer.Add(self.__path_list_filter_checkbox, 0, wx.EXPAND)
def show_disabled(event):
self.__path_list_ctrl.set_show_disabled(
self.__path_list_filter_checkbox.GetValue()
)
self.__path_list_filter_checkbox.Bind(wx.EVT_CHECKBOX, show_disabled)
hsizer.AddStretchSpacer()
#
# Help
#
hsizer.AddSpacer(5)
self.__path_list_help_button = wx.Button(
self.__path_list_sash, label="?", style=wx.BU_EXACTFIT
)
self.__path_list_help_button.Bind(wx.EVT_BUTTON, self.__on_help_path_list)
hsizer.Add(self.__path_list_help_button, 0, wx.EXPAND)
######################################################################
#
# Module view panel
#
######################################################################
self.__module_panel = wx.Panel(self.__path_module_imageset_panel)
######################################################################
#
# The imageset panel
#
######################################################################
self.__imageset_sash = wx.adv.SashLayoutWindow(
self.__path_module_imageset_panel, style=wx.NO_BORDER
)
self.__imageset_sash.SetOrientation(wx.adv.LAYOUT_HORIZONTAL)
self.__imageset_sash.SetAlignment(wx.adv.LAYOUT_BOTTOM)
self.__imageset_sash.SetDefaultSize((screen_width, screen_height / 4))
self.__imageset_sash.SetDefaultBorderSize(4)
self.__imageset_sash.SetExtraBorderSize(2)
self.__imageset_sash.SetSashVisible(wx.adv.SASH_TOP, True)
self.__imageset_sash.Bind(wx.adv.EVT_SASH_DRAGGED, self.__on_sash_drag)
self.__imageset_sash.Hide()
self.__imageset_panel = wx.Panel(self.__imageset_sash)
self.__imageset_panel.SetSizer(wx.BoxSizer())
self.__imageset_panel.SetAutoLayout(True)
self.__imageset_ctrl = ImageSetCtrl(
self.__workspace, self.__imageset_panel, read_only=True
)
self.__imageset_panel.GetSizer().Add(self.__imageset_ctrl, 1, wx.EXPAND)
self.__grid_ctrl = ModuleView.CornerButtonGrid(self.__imageset_panel)
self.__imageset_panel.GetSizer().Add(self.__grid_ctrl, 1, wx.EXPAND)
self.__right_win.GetSizer().AddSpacer(4)
#
# Preferences panel
#
self.__preferences_panel = wx.Panel(self.__right_win, -1)
self.__right_win.GetSizer().Add(self.__preferences_panel, 1, wx.EXPAND)
self.__preferences_panel.SetToolTip(
"The folder panel sets/creates the input and output folders and output filename. Once your pipeline is ready and your folders set, click 'Analyze Images' to begin the analysis run."
)
#
# Progress and status panels
#
self.__progress_panel = wx.Panel(self.__right_win)
self.__progress_panel.SetAutoLayout(True)
self.__right_win.GetSizer().Add(self.__progress_panel, 0, wx.EXPAND)
self.__status_panel = wx.Panel(self.__right_win)
self.__status_panel.SetAutoLayout(True)
self.__right_win.GetSizer().Add(self.__status_panel, 0, wx.EXPAND)
self.__add_menu()
self.__attach_views()
self.__set_properties()
self.__set_icon()
self.__do_layout()
self.startup_blurb_frame = WelcomeFrame(self)
self.__error_listeners = []
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.SetAutoLayout(True)
if get_startup_blurb():
self.show_welcome_screen(True)
self.show_module_ui(True)
def start(self, workspace_path, pipeline_path):
"""Handle resource loading after the GUI has been constructed
workspace_path - one of the following: a pathname to the workspace
to load, False to ask the user for a new workspace
or None to leave the decision to the user's
preference.
pipeline_path - the pipeline to load after the workspace has been
loaded or None for the workspace's pipeline.
"""
self.__pipeline_controller.start(workspace_path, pipeline_path)
self.__module_view.start()
#
# Do a little placement after the UI has been constructed
#
# Put the welcome screen over the module settings.
#
r = self.__right_win.GetScreenRect()
self.startup_blurb_frame.SetRect(r)
def show_path_list_ctrl(self, show):
"""Show or hide the path list control
show - true to show, false to hide
"""
if bool(show) == bool(self.__path_list_sash.IsShown()):
return
self.__path_list_sash.Show(show)
self.layout_pmi_panel()
self.__path_list_sash.Layout()
def show_imageset_sash(self, show):
"""Show or hide the imageset control
show - true to show, false to hide
"""
if bool(show) == bool(self.__imageset_sash.IsShown()):
return
self.__imageset_sash.Show(show)
self.layout_pmi_panel()
self.__imageset_sash.Layout()
def show_imageset_ctrl(self):
sizer = self.__imageset_panel.GetSizer()
assert isinstance(sizer, wx.Sizer)
if (
sizer.IsShown(self.__imageset_ctrl) is False
or self.__imageset_sash.IsShown() is False
):
sizer.Show(self.__imageset_ctrl, True)
sizer.Show(self.__grid_ctrl, False)
self.show_imageset_sash(True)
self.__imageset_panel.Layout()
def show_grid_ctrl(self, table=None):
if table is not None:
self.__grid_ctrl.SetTable(table)
sizer = self.__imageset_panel.GetSizer()
if (
sizer.IsShown(self.__imageset_ctrl)
or self.__imageset_sash.IsShown() is False
):
sizer.Show(self.__imageset_ctrl, False)
sizer.Show(self.__grid_ctrl, True)
self.show_imageset_sash(True)
self.__imageset_sash.Layout()
self.__imageset_panel.Layout()
self.__grid_ctrl.Layout()
def get_grid_ctrl(self):
return self.__grid_ctrl
def reset_imageset_ctrl(self, refresh_image_set=True):
if refresh_image_set:
self.__workspace.refresh_image_set()
self.__imageset_ctrl.recompute()
def show_module_ui(self, show):
"""Show or hide the module and notes panel"""
if (
show == self.__path_module_imageset_panel.IsShownOnScreen()
and show == self.__notes_panel.IsShownOnScreen()
):
return
right_sizer = self.__right_win.GetSizer()
assert isinstance(right_sizer, wx.Sizer)
right_sizer.Show(self.__notes_panel, show)
right_sizer.Show(self.__path_module_imageset_panel, show)
self.__right_win.Layout()
if show:
self.show_preferences(False)
self.layout_pmi_panel()
self.__path_list_sash.Layout()
self.__module_panel.Layout()
self.__module_view.module_panel.SetupScrolling(
scroll_x=True, scroll_y=True, scrollToTop=False
)
self.__imageset_sash.Layout()
def show_welcome_screen(self, show):
"""Show or hide the welcome screen
show - If True, show the welcome screen and hide the preferences
and module UI, otherwise hide the welcome screen.
"""
self.startup_blurb_frame.Show(show)
if show:
self.startup_blurb_frame.Raise()
def show_preferences(self, show):
"""Show or hide the preferences panel
show - if True, show the preferences panel and hide the welcome
and module UI. If false, just hide the preferences.
"""
self.__preferences_panel.Show(show)
if show:
self.show_module_ui(False)
self.show_welcome_screen(False)
self.__preferences_panel.Layout()
self.__preferences_panel.GetParent().Layout()
def __on_sash_drag(self, event):
sash = event.GetEventObject()
width, _ = sash.GetSize()
sash.SetDefaultSize((width, event.GetDragRect().height))
self.layout_pmi_panel()
sash.Layout()
def __on_path_module_imageset_panel_size(self, event):
if not self.__pmi_layout_in_progress:
self.layout_pmi_panel()
if self.__path_list_sash.IsShown():
self.__path_list_sash.Layout()
if self.__imageset_sash.IsShown():
self.__imageset_sash.Layout()
def layout_pmi_panel(self):
"""Run the sash layout algorithm on the path/module/imageset panel"""
self.__pmi_layout_in_progress = True
try:
wx.adv.LayoutAlgorithm().LayoutWindow(
self.__path_module_imageset_panel, self.__module_panel
)
self.__right_win.Layout()
finally:
self.__pmi_layout_in_progress = False
def OnClose(self, event):
if event.CanVeto() and not self.pipeline_controller.check_close():
event.Veto()
return
try:
self.__workspace.measurements.flush()
except:
logging.warning(
"Failed to flush temporary measurements file during close",
exc_info=True,
)
try:
from bioformats.formatreader import clear_image_reader_cache
clear_image_reader_cache()
except:
logging.warning(
"Failed to clear bioformats reader cache during close", exc_info=True,
)
try:
self.__preferences_view.close()
except:
logging.warning("Failed during close", exc_info=True)
try:
self.pipeline_controller.on_close()
except:
logging.warning("Failed to close the pipeline controller", exc_info=True)
try:
stop_validation_queue_thread()
except:
logging.warning("Failed to stop pipeline validation thread", exc_info=True)
wx.GetApp().ExitMainLoop()
def __set_properties(self):
self.SetTitle("CellProfiler %s" % cellprofiler.__version__)
self.SetSize((1024, 600))
def enable_edit_commands(self, ids):
"""Enable the edit commands that are supported by the focused window
ids - a list of the IDs supported by the window that has the focus.
This should be called when a window receives an EVT_SET_FOCUS or
when its state has changed to the point where it needs to enable
different sets of commands.
Commands that can be passed through here:
wx.ID_COPY
wx.ID_CUT
wx.ID_PASTE
wx.ID_DELETE
wx.ID_SELECTALL
"""
d = dict(
[(x, False) for x in (wx.ID_COPY, wx.ID_CUT, wx.ID_PASTE, wx.ID_SELECTALL)]
)
for eyedee in ids:
d[eyedee] = True
for k, v in list(d.items()):
self.menu_edit.Enable(k, v)
def __add_menu(self):
"""Add the menu to the frame
"""
self.__menu_file = wx.Menu()
self.__menu_file.Append(
wx.ID_NEW, "New Project", helpString="Create an empty project"
)
self.__menu_file.Append(
wx.ID_OPEN,
"Open Project...\tctrl+O",
helpString="Open a project from a .{} project file".format(EXT_PROJECT),
)
self.recent_workspace_files = wx.Menu()
self.__menu_file.AppendSubMenu(self.recent_workspace_files, "Open Recent")
self.__menu_file.Append(
wx.ID_SAVE,
"Save Project\tctrl+S",
helpString="Save the project to the current project file",
)
self.__menu_file.Append(
wx.ID_SAVEAS,
"Save Project As...",
helpString="Save the project to a file of your choice",
)
self.__menu_file.Append(
ID_FILE_REVERT_TO_SAVED,
"Revert to Saved",
helpString="Reload the project file, discarding changes",
)
submenu = wx.Menu()
submenu.Append(
ID_FILE_LOAD_PIPELINE,
"Pipeline from File...",
"Import a pipeline into the project from a .%s file" % EXT_PIPELINE,
)
submenu.Append(
ID_FILE_URL_LOAD_PIPELINE,
"Pipeline from URL...",
"Load a pipeline from the web",
)
submenu.Append(
ID_FILE_IMPORT_FILE_LIST,
"File List...",
"Add files or URLs to the Images module file list",
)
self.__menu_file.AppendSubMenu(submenu, "Import")
submenu = wx.Menu()
submenu.Append(
ID_FILE_SAVE_PIPELINE,
"Pipeline...\tctrl+P",
"Save the project's pipeline to a .%s file" % EXT_PIPELINE,
)
submenu.Append(
ID_FILE_EXPORT_IMAGE_SETS,
"Image Set Listing...",
"Export the project's image sets as a CSV file suitable for LoadData",
)
submenu.Append(
ID_FILE_EXPORT_PIPELINE_NOTES,
"Pipeline notes...",
"Save a text file outlining the pipeline's modules and module notes",
)
self.__menu_file.AppendSubMenu(submenu, "Export")
self.__menu_file.Append(
ID_FILE_CLEAR_PIPELINE,
"Clear Pipeline",
"Remove all modules from the current pipeline",
)
self.__menu_file.AppendSeparator()
self.__menu_file.Append(
ID_FILE_OPEN_IMAGE, "View Image", "Open an image file for viewing"
)
self.__menu_file.AppendSeparator()
self.__menu_file.Append(
ID_FILE_ANALYZE_IMAGES,
"Analyze Images\tctrl+N",
"Run the pipeline on the images in the image directory",
)
self.__menu_file.Append(
ID_FILE_STOP_ANALYSIS, "Stop Analysis", "Stop running the pipeline"
)
self.__menu_file.AppendSeparator()
if sys.platform == "darwin":
self.__menu_file.Append(ID_FILE_NEW_CP, "Open A New CP Window")
self.__menu_file.AppendSeparator()
self.__menu_file.Append(
ID_OPTIONS_PREFERENCES,
"&Preferences...",
"Set global application preferences",
)
self.recent_files = wx.Menu()
self.recent_pipeline_files = wx.Menu()
self.__menu_file.Append(ID_FILE_EXIT, "Q&uit\tctrl+Q", "Quit the application")
self.menu_edit = wx.Menu()
self.menu_edit.Append(wx.ID_UNDO, helpString="Undo last action")
self.menu_edit.AppendSeparator()
self.menu_edit.Append(wx.ID_CUT)
self.menu_edit.Append(wx.ID_COPY)
self.menu_edit.Append(wx.ID_PASTE)
self.menu_edit.Append(wx.ID_SELECTALL)
self.menu_edit.AppendSeparator()
self.menu_edit.Append(
ID_EDIT_MOVE_UP,
"Move Selected Modules &Up",
"Move selected modules toward the start of the pipeline",
)
self.menu_edit.Append(
ID_EDIT_MOVE_DOWN,
"Move Selected Modules &Down",
"Move selected modules toward the end of the pipeline",
)
self.menu_edit.Append(
ID_EDIT_DELETE, "&Delete Selected Modules", "Delete selected modules"
)
self.menu_edit.Append(
ID_EDIT_DUPLICATE,
"Duplicate Selected Modules",
"Duplicate selected modules",
)
self.menu_edit.Append(
ID_EDIT_ENABLE_MODULE,
"Disable Selected Modules",
"Disable a module to skip it when running the pipeline",
)
self.menu_edit_add_module = wx.Menu()
self.menu_edit.AppendSubMenu(self.menu_edit_add_module, "&Add Module")
self.menu_edit_goto_module = wx.Menu()
self.menu_edit.AppendSubMenu(self.menu_edit_goto_module, "&Go to Module")
self.menu_edit.AppendSeparator()
self.menu_edit.Append(
ID_EDIT_SHOW_FILE_LIST_IMAGE,
"Show Selected Image",
"Display the first selected image in the file list",
)
self.menu_edit.Append(
ID_EDIT_REMOVE_FROM_FILE_LIST,
"Remove From File List",
"Remove the selected files from the file list",
)
self.menu_edit.Append(
ID_EDIT_BROWSE_FOR_FILES,
"Browse for Images",
"Select images to add to the file list using a file browser",
)
self.menu_edit.Append(
ID_EDIT_BROWSE_FOR_FOLDER,
"Browse for Image Folder",
"Select a folder of images to add to the file list using a file browser",
)
self.menu_edit.Append(
ID_EDIT_CLEAR_FILE_LIST,
"Clear File List",
"Remove all files from the file list",
)
self.menu_edit.Append(
ID_EDIT_EXPAND_ALL,
"Expand All Folders",
"Expand all folders in the file list and show all file names",
)
self.menu_edit.Append(
ID_EDIT_COLLAPSE_ALL,
"Collapse All Folders",
"Collapse all folders in the file list, hiding all file names",
)
self.__menu_debug = wx.Menu()
self.__menu_debug.Append(
ID_DEBUG_TOGGLE, "&Start Test Mode\tF5", "Start the pipeline debugger"
)
self.__menu_debug.Append(
ID_DEBUG_STEP,
"Ste&p to Next Module\tF6",
"Execute the currently selected module",
)
self.__menu_debug.Append(
ID_DEBUG_NEXT_IMAGE_SET,
"&Next Image Set\tF7",
"Advance to the next image set",
)
self.__menu_debug.Append(
ID_DEBUG_NEXT_GROUP,
"Next Image &Group\tF8",
"Advance to the next group in the image set list",
)
self.__menu_debug.Append(
ID_DEBUG_CHOOSE_RANDOM_IMAGE_SET,
"Random Image Set",
"Advance to a random image set",
)
self.__menu_debug.Append(
ID_DEBUG_CHOOSE_RANDOM_IMAGE_GROUP,
"Random Image Group",
"Advance to a random image group",
)
self.__menu_debug.Append(
ID_DEBUG_CHOOSE_IMAGE_SET,
"Choose Image Set",
"Choose any of the available image sets",
)
self.__menu_debug.Append(
ID_DEBUG_CHOOSE_GROUP,
"Choose Image Group",
"Choose which image set group to process in test-mode",
)
self.__menu_debug.Append(
ID_DEBUG_VIEW_WORKSPACE, "View workspace", "View the current workspace",
)
if not hasattr(sys, "frozen") or os.getenv("CELLPROFILER_DEBUG"):
self.__menu_debug.Append(ID_DEBUG_RELOAD, "Reload Modules' Source")
self.__menu_debug.Append(ID_DEBUG_PDB, "Break Into Debugger")
#
# Lee wants the wx debugger
#
if os.environ.get("USERNAME", "").lower() == "leek":
self.__menu_debug.Append(ID_FILE_WIDGET_INSPECTOR, "Widget inspector")
self.__menu_debug.Append(ID_DEBUG_HELP, "Pipeline Testing Help")
self.__menu_debug.Enable(ID_DEBUG_STEP, False)
self.__menu_debug.Enable(ID_DEBUG_NEXT_IMAGE_SET, False)
self.__menu_debug.Enable(ID_DEBUG_NEXT_GROUP, False)
self.__menu_debug.Enable(ID_DEBUG_CHOOSE_GROUP, False)
self.__menu_debug.Enable(ID_DEBUG_CHOOSE_IMAGE_SET, False)
self.__menu_debug.Enable(ID_DEBUG_CHOOSE_RANDOM_IMAGE_SET, False)
self.__menu_debug.Enable(ID_DEBUG_CHOOSE_RANDOM_IMAGE_GROUP, False)
self.__menu_window = wx.Menu()
self.__menu_window.Append(
ID_WINDOW_CLOSE_ALL,
"Close &All Open Windows\tctrl+L",
"Close all open module display windows",
)
self.__menu_window.Append(
ID_WINDOW_SHOW_ALL_WINDOWS,
"Show All Windows On Run",
"Show all module display windows for all modules during analysis",
)
self.__menu_window.Append(
ID_WINDOW_HIDE_ALL_WINDOWS,
"Hide All Windows On Run",
"Hide all module display windows for all modules during analysis",
)
self.__menu_window.AppendSeparator()
self.__menu_window.Append(
ID_FILE_PLATEVIEWER,
"Show Plate Viewer",
"Open the plate viewer to inspect the images in the current workspace",
)
if sys.platform == "win32":
self.__menu_window.AppendSeparator()
self.__menu_help = Menu(self)
self.__menu_bar = wx.MenuBar()
self.__menu_bar.Append(self.__menu_file, "&File")
self.__menu_bar.Append(self.menu_edit, "&Edit")
self.__menu_bar.Append(self.__menu_debug, "&Test")
if get_show_sampling():
self.__menu_sample = wx.Menu()
self.__menu_sample.Append(
ID_SAMPLE_INIT,
"Initialize Sampling",
"Initialize sampling up to current module",
)
self.__menu_bar.Append(self.__menu_sample, "&Sample")
self.__menu_bar.Append(self.__menu_window, "&Windows")
if wx.VERSION <= (2, 8, 10, 1, "") and wx.Platform == "__WXMAC__":
self.__menu_bar.Append(self.__menu_help, "CellProfiler Help")
else:
self.__menu_bar.Append(self.__menu_help, "&Help")
self.SetMenuBar(self.__menu_bar)
self.enable_edit_commands([])
self.Bind(wx.EVT_MENU, self.on_open_image, id=ID_FILE_OPEN_IMAGE)
self.Bind(wx.EVT_MENU, lambda event: self.Close(), id=ID_FILE_EXIT)
self.Bind(wx.EVT_MENU, self.__on_widget_inspector, id=ID_FILE_WIDGET_INSPECTOR)
self.Bind(wx.EVT_MENU, self.__on_new_cp, id=ID_FILE_NEW_CP)
self.Bind(wx.EVT_MENU, self.on_cut, id=wx.ID_CUT)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_cut_ui, id=wx.ID_CUT)
self.Bind(wx.EVT_MENU, self.on_copy, id=wx.ID_COPY)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_copy_ui, id=wx.ID_COPY)
self.Bind(wx.EVT_MENU, self.on_paste, id=wx.ID_PASTE)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_paste_ui, id=wx.ID_PASTE)
self.Bind(wx.EVT_MENU, self.on_select_all, id=wx.ID_SELECTALL)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_select_all_ui, id=wx.ID_SELECTALL)
# ID_HELP_MODULE is used in _both_ button contexts and menu contexts,
# so it needs event bindings for either type
self.Bind(wx.EVT_MENU, self.__on_help_module, id=ID_HELP_MODULE)
self.Bind(wx.EVT_BUTTON, self.__on_help_module, id=ID_HELP_MODULE)
self.Bind(wx.EVT_MENU, self.__on_preferences, id=ID_OPTIONS_PREFERENCES)
self.Bind(wx.EVT_MENU, self.__on_close_all, id=ID_WINDOW_CLOSE_ALL)
self.Bind(wx.EVT_MENU, self.__debug_pdb, id=ID_DEBUG_PDB)
self.Bind(wx.EVT_MENU, self.__on_debug_help, id=ID_DEBUG_HELP)
accelerator_table = wx.AcceleratorTable(
[
(wx.ACCEL_CMD, ord("N"), ID_FILE_ANALYZE_IMAGES),
(wx.ACCEL_CMD, ord("O"), ID_FILE_LOAD),
(wx.ACCEL_CMD, ord("P"), ID_FILE_SAVE_PIPELINE),
(wx.ACCEL_CMD | wx.ACCEL_SHIFT, ord("S"), ID_FILE_SAVE),
(wx.ACCEL_CMD, ord("L"), ID_WINDOW_CLOSE_ALL),
(wx.ACCEL_CMD, ord("Q"), ID_FILE_EXIT),
(wx.ACCEL_CMD, ord("W"), ID_FILE_EXIT),
(wx.ACCEL_CMD, ord("A"), wx.ID_SELECTALL),
(wx.ACCEL_CMD, ord("C"), wx.ID_COPY),
(wx.ACCEL_CMD, ord("V"), wx.ID_PASTE),
(wx.ACCEL_NORMAL, wx.WXK_F5, ID_DEBUG_TOGGLE),
(wx.ACCEL_NORMAL, wx.WXK_F6, ID_DEBUG_STEP),
(wx.ACCEL_NORMAL, wx.WXK_F7, ID_DEBUG_NEXT_IMAGE_SET),
(wx.ACCEL_NORMAL, wx.WXK_F8, ID_DEBUG_NEXT_GROUP),
(wx.ACCEL_CMD, ord("Z"), ID_EDIT_UNDO),
]
)
self.SetAcceleratorTable(accelerator_table)
self.enable_launch_commands()
#########################################################
#
# Handlers for ID_CUT / ID_COPY / ID_DELETE / ID_PASTE
#
# Adapted from a post reply by <NAME>:
# http://wxpython-users.1045709.n5.nabble.com/how-to-implement-copy-paste-with-accelerators-td3337472.html
#########################################################
@staticmethod
def on_cut(event):
"""Handle ID_CUT"""
focus = wx.Window.FindFocus()
if (
focus is not None
and hasattr(focus, "Cut")
and hasattr(focus, "CanCut")
and focus.CanCut()
):
focus.Cut()
@staticmethod
def on_update_cut_ui(event):
focus = wx.Window.FindFocus()
event.Enable(bool(focus and hasattr(focus, "CanCut") and focus.CanCut()))
@staticmethod
def on_copy(event):
"""Handle ID_COPY"""
focus = wx.Window.FindFocus()
if (
focus is not None
and hasattr(focus, "Copy")
and hasattr(focus, "CanCopy")
and focus.CanCopy()
):
focus.Copy()
@staticmethod
def on_update_copy_ui(event):
focus = wx.Window.FindFocus()
event.Enable(bool(focus and hasattr(focus, "CanCopy") and focus.CanCopy()))
@staticmethod
def on_paste(event):
"""Handle ID_PASTE"""
focus = wx.Window.FindFocus()
if (
focus is not None
and hasattr(focus, "Paste")
and hasattr(focus, "CanPaste")
and focus.CanPaste()
):
focus.Paste()
@staticmethod
def on_update_paste_ui(event):
focus = wx.Window.FindFocus()
event.Enable(bool(focus and hasattr(focus, "CanPaste") and focus.CanPaste()))
@staticmethod
def on_select_all(event):
focus = wx.Window.FindFocus()
if focus and hasattr(focus, "SelectAll"):
focus.SelectAll()
@staticmethod
def on_update_select_all_ui(event):
focus = wx.Window.FindFocus()
if hasattr(focus, "CanSelect") and not focus.CanSelect():
event.Enable(False)
return
event.Enable(bool(focus and hasattr(focus, "SelectAll")))
debug_commands = (
ID_DEBUG_STEP,
ID_DEBUG_NEXT_IMAGE_SET,
ID_DEBUG_NEXT_GROUP,
ID_DEBUG_CHOOSE_GROUP,
ID_DEBUG_CHOOSE_IMAGE_SET,
ID_DEBUG_CHOOSE_RANDOM_IMAGE_SET,
ID_DEBUG_CHOOSE_RANDOM_IMAGE_GROUP,
ID_DEBUG_VIEW_WORKSPACE,
)
def enable_debug_commands(self):
"""Enable or disable the debug commands (like ID_DEBUG_STEP)"""
startstop = self.__menu_debug.FindItemById(ID_DEBUG_TOGGLE)
self.__menu_file.Enable(ID_FILE_ANALYZE_IMAGES, False)
assert isinstance(startstop, wx.MenuItem)
startstop.SetItemLabel("&Exit Test Mode\tF5")
startstop.SetHelp("Stop testing your pipeline")
for cmd in self.debug_commands:
self.__menu_debug.Enable(cmd, True)
def enable_launch_commands(self):
"""Enable commands to start analysis or test mode"""
startstop = self.__menu_debug.FindItemById(ID_DEBUG_TOGGLE)
startstop.SetItemLabel("&Start Test Mode\tF5")
startstop.SetHelp("Start testing your pipeline")
for cmd in self.debug_commands:
self.__menu_debug.Enable(cmd, False)
self.__menu_file.Enable(ID_FILE_ANALYZE_IMAGES, True)
self.__menu_debug.Enable(ID_DEBUG_TOGGLE, True)
self.__menu_file.Enable(ID_FILE_STOP_ANALYSIS, False)
def enable_analysis_commands(self):
"""Enable commands to pause or stop analysis"""
self.__menu_file.Enable(ID_FILE_ANALYZE_IMAGES, False)
self.__menu_debug.Enable(ID_DEBUG_TOGGLE, False)
self.__menu_file.Enable(ID_FILE_STOP_ANALYSIS, True)
@staticmethod
def __on_widget_inspector(evt):
try:
wx.lib.inspection.InspectionTool().Show()
except:
wx.MessageBox("Inspection tool is not available on this platform")
@staticmethod
def __on_preferences(event):
dlg = PreferencesDialog()
dlg.Show()
def __on_close_all(self, event):
close_all(self)
@staticmethod
def __on_new_cp(event):
if hasattr(sys, "frozen"):
os.system(
"open -na /Applications/CellProfiler-{}.app".format(
cellprofiler.__version__
)
)
else:
os.system("python3 -m cellprofiler")
def __on_help_path_list(self, event):
import cellprofiler.gui.htmldialog
dlg = cellprofiler.gui.htmldialog.HTMLDialog(
self, "Help on file list", rst_to_html_fragment(HELP_ON_FILE_LIST),
)
dlg.Show()
def __on_debug_help(self, event):
import cellprofiler.gui.htmldialog
contents = read_content("navigation_test_menu.rst")
help_dialog = cellprofiler.gui.htmldialog.HTMLDialog(
self, "Test Mode Help", rst_to_html_fragment(contents),
)
help_dialog.Show()
def __on_help_welcome(self, event):
self.show_welcome_screen(True)
def __on_help_module(self, event):
modules = self.__pipeline_list_view.get_selected_modules()
active_module = self.__pipeline_list_view.get_active_module()
if len(modules) > 0:
self.do_help_modules(modules)
elif active_module is not None:
self.do_help_module(active_module.module_name, active_module.get_help())
else:
wx.MessageBox(
HELP_ON_MODULE_BUT_NONE_SELECTED,
"No module selected",
style=wx.OK | wx.ICON_INFORMATION,
)
@staticmethod
def __debug_pdb(event):
pdb.set_trace()
def do_help_modules(self, modules):
for module in modules:
# An attempt to place images inline with the help. However, the
# images will not scale properly in size (yet)
# result = module.get_help()
# root = os.path.split(__file__)[0]
# if len(root) == 0:
# root = os.curdir
# root = os.path.split(os.path.abspath(root))[0] # Back up one level
# absolute_image_path = os.path.join(root, 'icons','%s.png'%(module.module_name,))
# Check if the file that goes with this module exists on this computer
# if os.path.exists(absolute_image_path) and os.path.isfile(absolute_image_path):
# If so, strip out end html tags so I can add more stuff
# result = result.replace('</body>','').replace('</html>','')
# Include images specific to the module
# result += '\n\n<div><p><img src="%s", width="50%%"></p></div>\n'%absolute_image_path
# Now end the help text
# result += '</body></html>'
# self.do_help_module(module.module_name, result)
self.do_help_module(module.module_name, module.get_help())
def do_help_module(self, module_name, help_text):
helpframe = wx.Frame(
self, -1, 'Help for module, "%s"' % module_name, size=(640, 480)
)
helpframe.SetMenuBar(wx.MenuBar())
####################################################
#
# Add the HTML window
#
####################################################
sizer = wx.BoxSizer()
helpframe.SetSizer(sizer)
window = HtmlClickableWindow(helpframe)
sizer.Add(window, 1, wx.EXPAND)
window.AppendToPage(help_text)
################################################
#
# Add a file menu for the frame
#
################################################
menu = wx.Menu()
menu.Append(ID_FILE_SAVE_PIPELINE, "&Save...")
menu.Append(ID_FILE_PRINT, "&Print...")
menu.Append(ID_FILE_EXIT, "E&xit")
def on_save(event):
self.save_help(event, module_name, help_text)
def on_print(event):
self.print_help(event, module_name, help_text)
def on_exit(event):
helpframe.Close()
helpframe.GetMenuBar().Append(menu, "&File")
helpframe.Bind(wx.EVT_MENU, on_save, id=ID_FILE_SAVE_PIPELINE)
helpframe.Bind(wx.EVT_MENU, on_print, id=ID_FILE_PRINT)
helpframe.Bind(wx.EVT_MENU, on_exit, id=ID_FILE_EXIT)
####################################################
#
# Add an edit menu
#
####################################################
menu = wx.Menu()
copy_menu_item = menu.Append(ID_EDIT_COPY, "Copy")
copy_menu_item.Enable(False)
menu.Append(ID_EDIT_SELECT_ALL, "Select All")
def on_idle(event):
copy_menu_item.Enable(len(window.SelectionToText()) > 0)
def on_edit_select_all(event):
window.SelectAll()
def on_copy(event):
data_object = wx.TextDataObject(window.SelectionToText())
if wx.TheClipboard.Open():
try:
wx.TheClipboard.SetData(data_object)
wx.TheClipboard.Flush()
finally:
wx.TheClipboard.Close()
else:
wx.MessageBox(
"Failed to copy to the clipboard", "Error", wx.OK | wx.ICON_ERROR
)
helpframe.GetMenuBar().Append(menu, "&Edit")
helpframe.Bind(wx.EVT_MENU, on_copy, id=ID_EDIT_COPY)
helpframe.Bind(wx.EVT_MENU, on_edit_select_all, id=ID_EDIT_SELECT_ALL)
helpframe.Bind(wx.EVT_IDLE, on_idle)
####################################################
#
# Build an accelerator table for some of the commands
#
####################################################
accelerator_table = wx.AcceleratorTable(
[
(wx.ACCEL_CMD, ord("Q"), ID_FILE_EXIT),
(wx.ACCEL_CMD, ord("P"), ID_FILE_PRINT),
(wx.ACCEL_CMD, ord("C"), ID_EDIT_COPY),
]
)
helpframe.SetAcceleratorTable(accelerator_table)
helpframe.SetIcon(cellprofiler.gui.utilities.icon.get_cp_icon())
helpframe.Layout()
helpframe.Show()
@staticmethod
def print_help(event, module_name, help_text):
"""Print the help text for a module"""
printer = wx.html.HtmlEasyPrinting("Printing %s" % module_name)
printer.GetPrintData().SetPaperId(wx.PAPER_LETTER)
printer.PrintText(help_text)
@staticmethod
def save_help(event, module_name, help_text):
"""Save the help text for a module"""
save_dlg = wx.FileDialog(
event.GetEventObject().GetWindow(),
message="Save help for %s to file" % module_name,
defaultFile="%s.html" % module_name,
wildcard="*.html",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
)
result = save_dlg.ShowModal()
if result == wx.ID_OK:
with codecs.open(save_dlg.GetPath(), "w", encoding="utf-8") as fd:
fd.write(
'<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />'
)
fd.write(help_text)
def on_open_image(self, event):
dlg = wx.FileDialog(
self,
message="Open an image file",
wildcard="Image file (*.tif,*.tiff,*.jpg,*.jpeg,*.png,*.gif,*.bmp)|*.tif;*.tiff;*.jpg;*.jpeg;*.png;*.gif;*.bmp|*.* (all files)|*.*",
style=wx.FD_OPEN,
)
if dlg.ShowModal() == wx.ID_OK:
from cellprofiler_core.image import FileImage
from .figure import Figure
lip = FileImage("dummy", "", dlg.GetPath())
image = lip.provide_image(None).pixel_data
frame = Figure(self, title=dlg.GetPath(), subplots=(1, 1))
if image.ndim == 3:
frame.subplot_imshow_color(0, 0, image, title=dlg.GetPath())
else:
frame.subplot_imshow_grayscale(0, 0, image, title=dlg.GetPath())
frame.panel.draw()
def __attach_views(self):
self.__pipeline_list_view = PipelineListView(self.__module_list_panel, self)
self.__pipeline_controller = PipelineController(self.__workspace, self)
self.__pipeline_list_view.attach_to_pipeline(
self.__pipeline, self.__pipeline_controller
)
self.__pipeline_controller.attach_to_test_controls_panel(
self.__pipeline_test_panel
)
self.__pipeline_controller.attach_to_module_controls_panel(
self.__module_controls_panel
)
self.__pipeline_controller.attach_to_path_list_ctrl(
self.__path_list_ctrl, self.__path_list_filter_checkbox
)
self.__module_view = ModuleView(
self.__module_panel,
self.__workspace,
frame=self,
notes_panel=self.__notes_panel,
)
self.__pipeline_controller.attach_to_module_view(self.__module_view)
self.__pipeline_list_view.attach_to_module_view(self.__module_view)
self.__preferences_view = PreferencesView(
self.__right_win.GetSizer(),
self.__preferences_panel,
self.__progress_panel,
self.__status_panel,
)
self.__preferences_view.attach_to_pipeline_list_view(self.__pipeline_list_view)
def __do_layout(self):
width = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_X)
height = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)
self.SetSize((int(width * 2 / 3), int(height * 2 / 3)))
splitter = self.__splitter
right_win = self.__right_win
top_left_win = self.__left_win
self.__splitter.SetMinimumPaneSize(120)
self.__splitter.SplitVertically(self.__left_win, self.__right_win, 300)
self.__splitter.BorderSize = 0
self.__splitter.SetMinimumPaneSize(5)
top_left_sizer = wx.BoxSizer(wx.VERTICAL)
top_left_sizer.Add(self.__module_list_panel, 1, wx.EXPAND | wx.ALL, 1)
top_left_sizer.Add(self.__module_controls_panel, 0, wx.EXPAND | wx.ALL, 2)
top_left_sizer.Add(self.__pipeline_test_panel, 0, wx.EXPAND | wx.ALL, 2)
top_left_win.SetSizer(top_left_sizer)
border = wx.BoxSizer()
border.Add(splitter, 1, wx.EXPAND | wx.ALL, 1)
self.SetSizer(border)
self.Layout()
right_win.Layout()
top_left_win.Layout()
def __set_icon(self):
self.SetIcon(cellprofiler.gui.utilities.icon.get_cp_icon())
def __on_data_tool_help(self, event, tool_name):
module = instantiate_module(tool_name)
self.do_help_module(tool_name, module.get_help())
def add_error_listener(self, listener):
"""Add a listener for display errors"""
self.__error_listeners.append(listener)
def remove_error_listener(self, listener):
"""Remove a listener for display errors"""
self.__error_listeners.remove(listener)
def get_preferences_view(self):
return self.__preferences_view
preferences_view = property(get_preferences_view)
def get_pipeline_controller(self):
"""Get the pipeline controller to drive testing"""
return self.__pipeline_controller
pipeline_controller = property(get_pipeline_controller)
def get_pipeline(self):
"""Get the pipeline - mostly to drive testing"""
return self.__pipeline
pipeline = property(get_pipeline)
def get_module_view(self):
"""Return the module view window"""
return self.__module_view
module_view = property(get_module_view)
def get_pipeline_list_view(self):
return self.__pipeline_list_view
pipeline_list_view = property(get_pipeline_list_view)
| [
"wx.html.HtmlEasyPrinting",
"bioformats.formatreader.clear_image_reader_cache",
"wx.TheClipboard.Close",
"wx.GetApp",
"wx.MessageBox",
"cellprofiler_core.preferences.get_show_sampling",
"wx.adv.SashLayoutWindow",
"wx.NewId",
"wx.SystemSettings.GetMetric",
"wx.Window.FindFocus",
"wx.CheckBox",
"cellprofiler.gui.utilities.icon.get_cp_icon",
"cellprofiler_core.preferences.get_startup_blurb",
"wx.TheClipboard.SetData",
"wx.Panel",
"wx.Frame",
"logging.warning",
"cellprofiler_core.utilities.core.modules.instantiate_module",
"wx.SplitterWindow",
"wx.adv.LayoutAlgorithm",
"wx.lib.inspection.InspectionTool",
"wx.TheClipboard.Flush",
"wx.Button",
"wx.MenuBar",
"os.getenv",
"wx.BoxSizer",
"os.environ.get",
"wx.FileDialog",
"pdb.set_trace",
"wx.Menu",
"os.system",
"wx.TheClipboard.Open"
] | [((2636, 2646), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (2644, 2646), False, 'import wx\n'), ((2675, 2685), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (2683, 2685), False, 'import wx\n'), ((2707, 2717), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (2715, 2717), False, 'import wx\n'), ((2733, 2743), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (2741, 2743), False, 'import wx\n'), ((2771, 2781), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (2779, 2781), False, 'import wx\n'), ((2806, 2816), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (2814, 2816), False, 'import wx\n'), ((2900, 2910), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (2908, 2910), False, 'import wx\n'), ((2936, 2946), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (2944, 2946), False, 'import wx\n'), ((2975, 2985), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (2983, 2985), False, 'import wx\n'), ((3018, 3028), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3026, 3028), False, 'import wx\n'), ((3056, 3066), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3064, 3066), False, 'import wx\n'), ((3092, 3102), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3100, 3102), False, 'import wx\n'), ((3127, 3137), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3135, 3137), False, 'import wx\n'), ((3154, 3164), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3162, 3164), False, 'import wx\n'), ((3187, 3197), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3195, 3197), False, 'import wx\n'), ((3215, 3225), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3223, 3225), False, 'import wx\n'), ((3248, 3258), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3256, 3258), False, 'import wx\n'), ((3274, 3284), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3282, 3284), False, 'import wx\n'), ((3305, 3315), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3313, 3315), False, 'import wx\n'), ((3360, 3370), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3368, 3370), False, 'import wx\n'), ((3391, 3401), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3399, 3401), False, 'import wx\n'), ((3419, 3429), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3427, 3429), False, 'import wx\n'), ((3452, 3462), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3460, 3462), False, 'import wx\n'), ((3486, 3496), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3494, 3496), False, 'import wx\n'), ((3524, 3534), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3532, 3534), False, 'import wx\n'), ((3563, 3573), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3571, 3573), False, 'import wx\n'), ((3600, 3610), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3608, 3610), False, 'import wx\n'), ((3643, 3653), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3651, 3653), False, 'import wx\n'), ((3685, 3695), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3693, 3695), False, 'import wx\n'), ((3720, 3730), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3728, 3730), False, 'import wx\n'), ((3754, 3764), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3762, 3764), False, 'import wx\n'), ((3782, 3792), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3790, 3792), False, 'import wx\n'), ((3860, 3870), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3868, 3870), False, 'import wx\n'), ((3890, 3900), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3898, 3900), False, 'import wx\n'), ((3917, 3927), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3925, 3927), False, 'import wx\n'), ((3954, 3964), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3962, 3964), False, 'import wx\n'), ((3987, 3997), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (3995, 3997), False, 'import wx\n'), ((4022, 4032), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4030, 4032), False, 'import wx\n'), ((4061, 4071), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4069, 4071), False, 'import wx\n'), ((4107, 4117), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4115, 4117), False, 'import wx\n'), ((4155, 4165), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4163, 4165), False, 'import wx\n'), ((4184, 4194), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4192, 4194), False, 'import wx\n'), ((4210, 4220), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4218, 4220), False, 'import wx\n'), ((4253, 4263), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4261, 4263), False, 'import wx\n'), ((4297, 4307), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4305, 4307), False, 'import wx\n'), ((4324, 4334), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4332, 4334), False, 'import wx\n'), ((4361, 4371), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4369, 4371), False, 'import wx\n'), ((4396, 4406), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4404, 4406), False, 'import wx\n'), ((4426, 4436), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4434, 4436), False, 'import wx\n'), ((4459, 4469), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4467, 4469), False, 'import wx\n'), ((4499, 4509), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4507, 4509), False, 'import wx\n'), ((4539, 4549), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4547, 4549), False, 'import wx\n'), ((4694, 4704), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4702, 4704), False, 'import wx\n'), ((4727, 4737), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (4735, 4737), False, 'import wx\n'), ((5196, 5243), 'wx.SplitterWindow', 'wx.SplitterWindow', (['self', '(-1)'], {'style': 'wx.SP_BORDER'}), '(self, -1, style=wx.SP_BORDER)\n', (5213, 5243), False, 'import wx\n'), ((5337, 5381), 'wx.SystemSettings.GetMetric', 'wx.SystemSettings.GetMetric', (['wx.SYS_SCREEN_X'], {}), '(wx.SYS_SCREEN_X)\n', (5364, 5381), False, 'import wx\n'), ((5406, 5450), 'wx.SystemSettings.GetMetric', 'wx.SystemSettings.GetMetric', (['wx.SYS_SCREEN_Y'], {}), '(wx.SYS_SCREEN_Y)\n', (5433, 5450), False, 'import wx\n'), ((5709, 5756), 'wx.Panel', 'wx.Panel', (['self.__splitter'], {'style': 'wx.BORDER_NONE'}), '(self.__splitter, style=wx.BORDER_NONE)\n', (5717, 5756), False, 'import wx\n'), ((5829, 5876), 'wx.Panel', 'wx.Panel', (['self.__splitter'], {'style': 'wx.BORDER_NONE'}), '(self.__splitter, style=wx.BORDER_NONE)\n', (5837, 5876), False, 'import wx\n'), ((5960, 5985), 'wx.Panel', 'wx.Panel', (['self.__left_win'], {}), '(self.__left_win)\n', (5968, 5985), False, 'import wx\n'), ((6232, 6261), 'wx.Panel', 'wx.Panel', (['self.__left_win', '(-1)'], {}), '(self.__left_win, -1)\n', (6240, 6261), False, 'import wx\n'), ((6546, 6597), 'wx.Panel', 'wx.Panel', (['self.__left_win', '(-1)'], {'style': 'wx.BORDER_NONE'}), '(self.__left_win, -1, style=wx.BORDER_NONE)\n', (6554, 6597), False, 'import wx\n'), ((7375, 7401), 'wx.Panel', 'wx.Panel', (['self.__right_win'], {}), '(self.__right_win)\n', (7383, 7401), False, 'import wx\n'), ((7578, 7604), 'wx.Panel', 'wx.Panel', (['self.__right_win'], {}), '(self.__right_win)\n', (7586, 7604), False, 'import wx\n'), ((8286, 8364), 'wx.adv.SashLayoutWindow', 'wx.adv.SashLayoutWindow', (['self.__path_module_imageset_panel'], {'style': 'wx.NO_BORDER'}), '(self.__path_module_imageset_panel, style=wx.NO_BORDER)\n', (8309, 8364), False, 'import wx\n'), ((8909, 8933), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (8920, 8933), False, 'import wx\n'), ((9533, 9559), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (9544, 9559), False, 'import wx\n'), ((9745, 9819), 'wx.CheckBox', 'wx.CheckBox', (['self.__path_list_sash'], {'label': '"""Show files excluded by filters"""'}), "(self.__path_list_sash, label='Show files excluded by filters')\n", (9756, 9819), False, 'import wx\n'), ((10286, 10351), 'wx.Button', 'wx.Button', (['self.__path_list_sash'], {'label': '"""?"""', 'style': 'wx.BU_EXACTFIT'}), "(self.__path_list_sash, label='?', style=wx.BU_EXACTFIT)\n", (10295, 10351), False, 'import wx\n'), ((10758, 10801), 'wx.Panel', 'wx.Panel', (['self.__path_module_imageset_panel'], {}), '(self.__path_module_imageset_panel)\n', (10766, 10801), False, 'import wx\n'), ((11042, 11120), 'wx.adv.SashLayoutWindow', 'wx.adv.SashLayoutWindow', (['self.__path_module_imageset_panel'], {'style': 'wx.NO_BORDER'}), '(self.__path_module_imageset_panel, style=wx.NO_BORDER)\n', (11065, 11120), False, 'import wx\n'), ((11675, 11705), 'wx.Panel', 'wx.Panel', (['self.__imageset_sash'], {}), '(self.__imageset_sash)\n', (11683, 11705), False, 'import wx\n'), ((12304, 12334), 'wx.Panel', 'wx.Panel', (['self.__right_win', '(-1)'], {}), '(self.__right_win, -1)\n', (12312, 12334), False, 'import wx\n'), ((12754, 12780), 'wx.Panel', 'wx.Panel', (['self.__right_win'], {}), '(self.__right_win)\n', (12762, 12780), False, 'import wx\n'), ((12938, 12964), 'wx.Panel', 'wx.Panel', (['self.__right_win'], {}), '(self.__right_win)\n', (12946, 12964), False, 'import wx\n'), ((13409, 13428), 'cellprofiler_core.preferences.get_startup_blurb', 'get_startup_blurb', ([], {}), '()\n', (13426, 13428), False, 'from cellprofiler_core.preferences import get_startup_blurb\n'), ((21152, 21161), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (21159, 21161), False, 'import wx\n'), ((21509, 21518), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (21516, 21518), False, 'import wx\n'), ((22146, 22155), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (22153, 22155), False, 'import wx\n'), ((22737, 22746), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (22744, 22746), False, 'import wx\n'), ((24443, 24452), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (24450, 24452), False, 'import wx\n'), ((24490, 24499), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (24497, 24499), False, 'import wx\n'), ((24613, 24622), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (24620, 24622), False, 'import wx\n'), ((25817, 25826), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (25824, 25826), False, 'import wx\n'), ((25943, 25952), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (25950, 25952), False, 'import wx\n'), ((27390, 27399), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (27397, 27399), False, 'import wx\n'), ((29863, 29872), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (29870, 29872), False, 'import wx\n'), ((30850, 30862), 'wx.MenuBar', 'wx.MenuBar', ([], {}), '()\n', (30860, 30862), False, 'import wx\n'), ((31047, 31066), 'cellprofiler_core.preferences.get_show_sampling', 'get_show_sampling', ([], {}), '()\n', (31064, 31066), False, 'from cellprofiler_core.preferences import get_show_sampling\n'), ((34710, 34731), 'wx.Window.FindFocus', 'wx.Window.FindFocus', ([], {}), '()\n', (34729, 34731), False, 'import wx\n'), ((34988, 35009), 'wx.Window.FindFocus', 'wx.Window.FindFocus', ([], {}), '()\n', (35007, 35009), False, 'import wx\n'), ((35180, 35201), 'wx.Window.FindFocus', 'wx.Window.FindFocus', ([], {}), '()\n', (35199, 35201), False, 'import wx\n'), ((35463, 35484), 'wx.Window.FindFocus', 'wx.Window.FindFocus', ([], {}), '()\n', (35482, 35484), False, 'import wx\n'), ((35659, 35680), 'wx.Window.FindFocus', 'wx.Window.FindFocus', ([], {}), '()\n', (35678, 35680), False, 'import wx\n'), ((35947, 35968), 'wx.Window.FindFocus', 'wx.Window.FindFocus', ([], {}), '()\n', (35966, 35968), False, 'import wx\n'), ((36120, 36141), 'wx.Window.FindFocus', 'wx.Window.FindFocus', ([], {}), '()\n', (36139, 36141), False, 'import wx\n'), ((36297, 36318), 'wx.Window.FindFocus', 'wx.Window.FindFocus', ([], {}), '()\n', (36316, 36318), False, 'import wx\n'), ((40081, 40096), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (40094, 40096), False, 'import pdb\n'), ((41389, 41463), 'wx.Frame', 'wx.Frame', (['self', '(-1)', '(\'Help for module, "%s"\' % module_name)'], {'size': '(640, 480)'}), '(self, -1, \'Help for module, "%s"\' % module_name, size=(640, 480))\n', (41397, 41463), False, 'import wx\n'), ((41718, 41731), 'wx.BoxSizer', 'wx.BoxSizer', ([], {}), '()\n', (41729, 41731), False, 'import wx\n'), ((42083, 42092), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (42090, 42092), False, 'import wx\n'), ((42910, 42919), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (42917, 42919), False, 'import wx\n'), ((44739, 44792), 'wx.html.HtmlEasyPrinting', 'wx.html.HtmlEasyPrinting', (["('Printing %s' % module_name)"], {}), "('Printing %s' % module_name)\n", (44763, 44792), False, 'import wx\n'), ((45666, 45876), 'wx.FileDialog', 'wx.FileDialog', (['self'], {'message': '"""Open an image file"""', 'wildcard': '"""Image file (*.tif,*.tiff,*.jpg,*.jpeg,*.png,*.gif,*.bmp)|*.tif;*.tiff;*.jpg;*.jpeg;*.png;*.gif;*.bmp|*.* (all files)|*.*"""', 'style': 'wx.FD_OPEN'}), "(self, message='Open an image file', wildcard=\n 'Image file (*.tif,*.tiff,*.jpg,*.jpeg,*.png,*.gif,*.bmp)|*.tif;*.tiff;*.jpg;*.jpeg;*.png;*.gif;*.bmp|*.* (all files)|*.*'\n , style=wx.FD_OPEN)\n", (45679, 45876), False, 'import wx\n'), ((47849, 47893), 'wx.SystemSettings.GetMetric', 'wx.SystemSettings.GetMetric', (['wx.SYS_SCREEN_X'], {}), '(wx.SYS_SCREEN_X)\n', (47876, 47893), False, 'import wx\n'), ((47911, 47955), 'wx.SystemSettings.GetMetric', 'wx.SystemSettings.GetMetric', (['wx.SYS_SCREEN_Y'], {}), '(wx.SYS_SCREEN_Y)\n', (47938, 47955), False, 'import wx\n'), ((48371, 48395), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (48382, 48395), False, 'import wx\n'), ((48703, 48716), 'wx.BoxSizer', 'wx.BoxSizer', ([], {}), '()\n', (48714, 48716), False, 'import wx\n'), ((49047, 49076), 'cellprofiler_core.utilities.core.modules.instantiate_module', 'instantiate_module', (['tool_name'], {}), '(tool_name)\n', (49065, 49076), False, 'from cellprofiler_core.utilities.core.modules import instantiate_module\n'), ((7320, 7344), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (7331, 7344), False, 'import wx\n'), ((8973, 8997), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (8984, 8997), False, 'import wx\n'), ((11745, 11758), 'wx.BoxSizer', 'wx.BoxSizer', ([], {}), '()\n', (11756, 11758), False, 'import wx\n'), ((19448, 19474), 'bioformats.formatreader.clear_image_reader_cache', 'clear_image_reader_cache', ([], {}), '()\n', (19472, 19474), False, 'from bioformats.formatreader import clear_image_reader_cache\n'), ((28891, 28922), 'os.getenv', 'os.getenv', (['"""CELLPROFILER_DEBUG"""'], {}), "('CELLPROFILER_DEBUG')\n", (28900, 28922), False, 'import os\n'), ((31101, 31110), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (31108, 31110), False, 'import wx\n'), ((38782, 38818), 'os.system', 'os.system', (['"""python3 -m cellprofiler"""'], {}), "('python3 -m cellprofiler')\n", (38791, 38818), False, 'import os\n'), ((41515, 41527), 'wx.MenuBar', 'wx.MenuBar', ([], {}), '()\n', (41525, 41527), False, 'import wx\n'), ((43353, 43375), 'wx.TheClipboard.Open', 'wx.TheClipboard.Open', ([], {}), '()\n', (43373, 43375), False, 'import wx\n'), ((44505, 44550), 'cellprofiler.gui.utilities.icon.get_cp_icon', 'cellprofiler.gui.utilities.icon.get_cp_icon', ([], {}), '()\n', (44548, 44550), False, 'import cellprofiler\n'), ((48929, 48974), 'cellprofiler.gui.utilities.icon.get_cp_icon', 'cellprofiler.gui.utilities.icon.get_cp_icon', ([], {}), '()\n', (48972, 48974), False, 'import cellprofiler\n'), ((19211, 19305), 'logging.warning', 'logging.warning', (['"""Failed to flush temporary measurements file during close"""'], {'exc_info': '(True)'}), "('Failed to flush temporary measurements file during close',\n exc_info=True)\n", (19226, 19305), False, 'import logging\n'), ((19503, 19593), 'logging.warning', 'logging.warning', (['"""Failed to clear bioformats reader cache during close"""'], {'exc_info': '(True)'}), "('Failed to clear bioformats reader cache during close',\n exc_info=True)\n", (19518, 19593), False, 'import logging\n'), ((19706, 19759), 'logging.warning', 'logging.warning', (['"""Failed during close"""'], {'exc_info': '(True)'}), "('Failed during close', exc_info=True)\n", (19721, 19759), False, 'import logging\n'), ((19850, 19923), 'logging.warning', 'logging.warning', (['"""Failed to close the pipeline controller"""'], {'exc_info': '(True)'}), "('Failed to close the pipeline controller', exc_info=True)\n", (19865, 19923), False, 'import logging\n'), ((20009, 20084), 'logging.warning', 'logging.warning', (['"""Failed to stop pipeline validation thread"""'], {'exc_info': '(True)'}), "('Failed to stop pipeline validation thread', exc_info=True)\n", (20024, 20084), False, 'import logging\n'), ((20093, 20104), 'wx.GetApp', 'wx.GetApp', ([], {}), '()\n', (20102, 20104), False, 'import wx\n'), ((38271, 38337), 'wx.MessageBox', 'wx.MessageBox', (['"""Inspection tool is not available on this platform"""'], {}), "('Inspection tool is not available on this platform')\n", (38284, 38337), False, 'import wx\n'), ((39858, 39967), 'wx.MessageBox', 'wx.MessageBox', (['HELP_ON_MODULE_BUT_NONE_SELECTED', '"""No module selected"""'], {'style': '(wx.OK | wx.ICON_INFORMATION)'}), "(HELP_ON_MODULE_BUT_NONE_SELECTED, 'No module selected', style\n =wx.OK | wx.ICON_INFORMATION)\n", (39871, 39967), False, 'import wx\n'), ((43602, 43687), 'wx.MessageBox', 'wx.MessageBox', (['"""Failed to copy to the clipboard"""', '"""Error"""', '(wx.OK | wx.ICON_ERROR)'], {}), "('Failed to copy to the clipboard', 'Error', wx.OK | wx.ICON_ERROR\n )\n", (43615, 43687), False, 'import wx\n'), ((18741, 18765), 'wx.adv.LayoutAlgorithm', 'wx.adv.LayoutAlgorithm', ([], {}), '()\n', (18763, 18765), False, 'import wx\n'), ((38201, 38235), 'wx.lib.inspection.InspectionTool', 'wx.lib.inspection.InspectionTool', ([], {}), '()\n', (38233, 38235), False, 'import wx\n'), ((43418, 43454), 'wx.TheClipboard.SetData', 'wx.TheClipboard.SetData', (['data_object'], {}), '(data_object)\n', (43441, 43454), False, 'import wx\n'), ((43475, 43498), 'wx.TheClipboard.Flush', 'wx.TheClipboard.Flush', ([], {}), '()\n', (43496, 43498), False, 'import wx\n'), ((43544, 43567), 'wx.TheClipboard.Close', 'wx.TheClipboard.Close', ([], {}), '()\n', (43565, 43567), False, 'import wx\n'), ((29161, 29191), 'os.environ.get', 'os.environ.get', (['"""USERNAME"""', '""""""'], {}), "('USERNAME', '')\n", (29175, 29191), False, 'import os\n')] |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Player(models.Model):
name = models.CharField(max_length=14) #TODO: check if name is taken in the current game
user_profile = models.OneToOneField(User, on_delete=models.SET_NULL, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
last_used = models.DateTimeField(auto_now=True) #TODO: upgrade this value
def __str__(self):
if self.user_profile:
return f"{self.user_profile}@{self.name}"
else:
return f"@{self.name}"
class Game(models.Model):
player_numbers_choices = [
(2, 'Ketto'),
(3, 'Harom'),
(4, 'Negy'),
(1, '_Fuggoben_'),
]
pakli = models.CharField(max_length=40, unique=False, blank=True)
players = models.ManyToManyField(Player)
start_date = models.DateTimeField(auto_now_add=True)
does_ended = models.BooleanField(default=False)
number_of_players = models.SmallIntegerField(choices=player_numbers_choices)
# TODO: settings to be implemented (4: betli, kismars, nagymars pontszam)
def __str__(self):
return f"Game#{self.id} (p{self.number_of_players})"
| [
"django.db.models.OneToOneField",
"django.db.models.ManyToManyField",
"django.db.models.BooleanField",
"django.db.models.SmallIntegerField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((141, 172), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(14)'}), '(max_length=14)\n', (157, 172), False, 'from django.db import models\n'), ((242, 318), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.SET_NULL', 'null': '(True)', 'blank': '(True)'}), '(User, on_delete=models.SET_NULL, null=True, blank=True)\n', (262, 318), False, 'from django.db import models\n'), ((333, 372), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (353, 372), False, 'from django.db import models\n'), ((389, 424), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (409, 424), False, 'from django.db import models\n'), ((779, 836), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'unique': '(False)', 'blank': '(True)'}), '(max_length=40, unique=False, blank=True)\n', (795, 836), False, 'from django.db import models\n'), ((851, 881), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Player'], {}), '(Player)\n', (873, 881), False, 'from django.db import models\n'), ((899, 938), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (919, 938), False, 'from django.db import models\n'), ((956, 990), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (975, 990), False, 'from django.db import models\n'), ((1015, 1071), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'choices': 'player_numbers_choices'}), '(choices=player_numbers_choices)\n', (1039, 1071), False, 'from django.db import models\n')] |
from fastapi import FastAPI
from motor.motor_asyncio import AsyncIOMotorClient
from pytest import fixture
from fastapi_pagination import LimitOffsetPage, Page, add_pagination
from fastapi_pagination.ext.motor import paginate
from ..base import BasePaginationTestCase
@fixture(scope="session")
def database_url(mongodb_url) -> str:
return mongodb_url
@fixture(scope="session")
def db_client(database_url):
return AsyncIOMotorClient(database_url)
@fixture(scope="session")
def app(db_client, model_cls):
app = FastAPI()
@app.on_event("startup")
async def on_startup() -> None:
await db_client.test.users.delete_many({})
await db_client.drop_database("test")
@app.on_event("shutdown")
def on_shutdown() -> None:
db_client.close()
@app.get("/default", response_model=Page[model_cls])
@app.get("/limit-offset", response_model=LimitOffsetPage[model_cls])
async def route():
return await paginate(db_client.test.users)
return add_pagination(app)
class TestMotor(BasePaginationTestCase):
@fixture(scope="class")
async def entities(self, db_client):
cursor = db_client.test.users.find()
return await cursor.to_list(length=None)
| [
"fastapi_pagination.add_pagination",
"fastapi.FastAPI",
"motor.motor_asyncio.AsyncIOMotorClient",
"fastapi_pagination.ext.motor.paginate",
"pytest.fixture"
] | [((272, 296), 'pytest.fixture', 'fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (279, 296), False, 'from pytest import fixture\n'), ((361, 385), 'pytest.fixture', 'fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (368, 385), False, 'from pytest import fixture\n'), ((462, 486), 'pytest.fixture', 'fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (469, 486), False, 'from pytest import fixture\n'), ((426, 458), 'motor.motor_asyncio.AsyncIOMotorClient', 'AsyncIOMotorClient', (['database_url'], {}), '(database_url)\n', (444, 458), False, 'from motor.motor_asyncio import AsyncIOMotorClient\n'), ((528, 537), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (535, 537), False, 'from fastapi import FastAPI\n'), ((1007, 1026), 'fastapi_pagination.add_pagination', 'add_pagination', (['app'], {}), '(app)\n', (1021, 1026), False, 'from fastapi_pagination import LimitOffsetPage, Page, add_pagination\n'), ((1075, 1097), 'pytest.fixture', 'fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (1082, 1097), False, 'from pytest import fixture\n'), ((964, 994), 'fastapi_pagination.ext.motor.paginate', 'paginate', (['db_client.test.users'], {}), '(db_client.test.users)\n', (972, 994), False, 'from fastapi_pagination.ext.motor import paginate\n')] |
import pytest
from bs4 import BeautifulSoup
from srblib import on_travis, Soup
def test_soup():
soup = BeautifulSoup('<p><div class="good">hello</div><div>world</div></p>','html.parser')
a = Soup(soup)
b = a['div']
assert(len(b) == 2)
b = a['div',{'class':'good'}]
assert(len(b) == 1)
b = a['div'][0].parent['div'] # cascading over [],find_all and parent
assert(len(b) == 2)
| [
"bs4.BeautifulSoup",
"srblib.Soup"
] | [((108, 196), 'bs4.BeautifulSoup', 'BeautifulSoup', (['"""<p><div class="good">hello</div><div>world</div></p>"""', '"""html.parser"""'], {}), '(\'<p><div class="good">hello</div><div>world</div></p>\',\n \'html.parser\')\n', (121, 196), False, 'from bs4 import BeautifulSoup\n'), ((200, 210), 'srblib.Soup', 'Soup', (['soup'], {}), '(soup)\n', (204, 210), False, 'from srblib import on_travis, Soup\n')] |
# Author: <NAME> (<EMAIL>)
from PyQt5 import QtWidgets, QtCore
from GUI.ui_channel_settings import Ui_Form
from GUI.set_channel_dialog import SetChannelDialog
class SettingsWindow(QtWidgets.QWidget):
def __init__(self, parent=None):
super(SettingsWindow, self).__init__()
self.parent = parent
self.ui = Ui_Form()
self.ui.setupUi(self)
self.actualChannel = self.parent.openbci_conn.driver.get_channel()
self.ui.getChannelButton.clicked.connect(self.get_channel)
self.ui.setChannelButton.clicked.connect(self.set_channel)
self.ui.setChannelOverrideButton.clicked.connect(self.set_channel_override)
self.ui.getSystemStatusButton.clicked.connect(self.get_system_status)
def get_channel(self):
self.actualChannel = self.parent.openbci_conn.driver.get_channel()
self.ui.channelLabel.setText(f"Channel: {self.actualChannel}")
QtCore.QTimer.singleShot(5000, lambda: self.ui.channelLabel.setText(""))
def get_system_status(self):
if self.parent.openbci_conn.driver.get_system_status():
self.ui.channelLabel.setText("Cython board and dongle paired succesfully")
else:
self.ui.channelLabel.setText("Cython board is not paired")
QtCore.QTimer.singleShot(5000, lambda: self.ui.channelLabel.setText(""))
def set_channel_general(self, title, callback):
self.newChannelDialog = SetChannelDialog(self, "Set channel", self.parent.openbci_conn.driver.set_channel)
if self.newChannelDialog.exec_():
self.ui.channelLabel.setText(f"Channel changed succesfully: {self.newChannelDialog.new_channel}")
else:
self.ui.channelLabel.setText("Channel was not changed")
QtCore.QTimer.singleShot(5000, lambda: self.ui.channelLabel.setText(""))
def set_channel(self):
self.set_channel_general("Set channel", self.parent.openbci_conn.driver.set_channel)
def set_channel_override(self):
self.set_channel_general("Set dongle channel only", self.parent.openbci_conn.driver.set_channel_override)
| [
"GUI.set_channel_dialog.SetChannelDialog",
"GUI.ui_channel_settings.Ui_Form"
] | [((333, 342), 'GUI.ui_channel_settings.Ui_Form', 'Ui_Form', ([], {}), '()\n', (340, 342), False, 'from GUI.ui_channel_settings import Ui_Form\n'), ((1436, 1523), 'GUI.set_channel_dialog.SetChannelDialog', 'SetChannelDialog', (['self', '"""Set channel"""', 'self.parent.openbci_conn.driver.set_channel'], {}), "(self, 'Set channel', self.parent.openbci_conn.driver.\n set_channel)\n", (1452, 1523), False, 'from GUI.set_channel_dialog import SetChannelDialog\n')] |
'''
generate the high order SCC IM
'''
import os
from os import path
import sys
import itertools
import numpy as np
from scipy.ndimage.interpolation import rotate
from scipy.signal import welch
import matplotlib.pyplot as plt
import matplotlib as mpl
import multiprocessing as mp
from tqdm.contrib.concurrent import process_map
# from toolz import pipe
from .utils import *
from .par_functions import return_vars, propagate, scc, make_IM, make_cov, make_covinvrefj
from ..utils import joinsimdata
imagepix, pupilpix, beam_ratio, e, no_phase_offset,xy_dh,grid,N_act,wav0,amp,aperture,indpup,ind_mask_dh,loopx,loopy,freq_loop,pa_loop,n,refdir,iter_arr=return_vars()
def make_im_scc_howfs():
covinvcor_path = joinsimdata(f"covinvcor_{refdir}.npy")
if path.isfile(covinvcor_path):
covinvcor = np.load(covinvcor_path)
else:
print('Making sine and cosine references')
try:
os.makedirs(joinsimdata(refdir))
except Exception:
dum = 1
results = process_map(make_IM, iter_arr, max_workers=mp.cpu_count())
fourierarr_path = joinsimdata('fourierarr_pupilpix_'+str(int(pupilpix))+'_N_act_'+str(int(N_act))+'_sin_amp_'+str(int(round(amp/1e-9*wav0/(2.*np.pi))))+'.npy')
if not path.isfile(fourierarr_path):
fourierarr=np.zeros((n,aperture[indpup].shape[0]))
for result in results:
cos,sin,i = result
fourierarr[i] = cos
fourierarr[i+len(freq_loop)] = sin
np.save(fourierarr_path, fourierarr)
results = False
fourierarr = False
tpool.close()
tpool.join()
#make covariance matrix:
print('Making covariance matrix')
tpool = mp.Pool(processes=numcores)
i_arr = list(range(n))
results = tpool.map(make_cov, i_arr)
tpool.close()
tpool.join()
cov = np.zeros((2*len(freq_loop),2*len(freq_loop)))
for result in results:
covi, i = result
for j in range(i+1):
cov[i,j] = covi[j]
cov[j,i] = covi[j] #symmetric matrix
np.save(joinsimdata('cov_'+refdir),cov)
#invert covariance matrix:
rcond = 1e-5 #for this setup, anything below rcond = 1e-2 makes no difference becuase of the implemented binary masks; this will likely not be true and need to be optimized in real life
covinv = np.linalg.pinv(cov,rcond=rcond)
np.save(joinsimdata('covinv_'+refdir),covinv)
cov = False
#dot product by reference vector to not have to save reference images
refi = np.load(joinsimdata(refdir+'/'+str(i)+'.npy'))
tpool=mp.Pool(processes=numcores)
results=tpool.map(make_covinvrefj,i_arr)
tpool.close()
tpool.join()
covinvcor=np.zeros((n,refi.shape[0]))
for result in results:
i,covinvcori=result
covinvcor[i]=covinvcori
np.save(joinsimdata('covinvcor_'+refdir), covinvcor)
covinv=False
results=False
return covinvcor
#delete files used to create covariance matrix
# import shutil
# shutil.rmtree(refdir)
| [
"numpy.linalg.pinv",
"multiprocessing.cpu_count",
"os.path.isfile",
"numpy.zeros",
"multiprocessing.Pool",
"numpy.load",
"numpy.save"
] | [((755, 782), 'os.path.isfile', 'path.isfile', (['covinvcor_path'], {}), '(covinvcor_path)\n', (766, 782), False, 'from os import path\n'), ((798, 821), 'numpy.load', 'np.load', (['covinvcor_path'], {}), '(covinvcor_path)\n', (805, 821), True, 'import numpy as np\n'), ((1577, 1604), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'numcores'}), '(processes=numcores)\n', (1584, 1604), True, 'import multiprocessing as mp\n'), ((2161, 2193), 'numpy.linalg.pinv', 'np.linalg.pinv', (['cov'], {'rcond': 'rcond'}), '(cov, rcond=rcond)\n', (2175, 2193), True, 'import numpy as np\n'), ((2395, 2422), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'numcores'}), '(processes=numcores)\n', (2402, 2422), True, 'import multiprocessing as mp\n'), ((2512, 2540), 'numpy.zeros', 'np.zeros', (['(n, refi.shape[0])'], {}), '((n, refi.shape[0]))\n', (2520, 2540), True, 'import numpy as np\n'), ((1193, 1221), 'os.path.isfile', 'path.isfile', (['fourierarr_path'], {}), '(fourierarr_path)\n', (1204, 1221), False, 'from os import path\n'), ((1237, 1277), 'numpy.zeros', 'np.zeros', (['(n, aperture[indpup].shape[0])'], {}), '((n, aperture[indpup].shape[0]))\n', (1245, 1277), True, 'import numpy as np\n'), ((1392, 1428), 'numpy.save', 'np.save', (['fourierarr_path', 'fourierarr'], {}), '(fourierarr_path, fourierarr)\n', (1399, 1428), True, 'import numpy as np\n'), ((1005, 1019), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1017, 1019), True, 'import multiprocessing as mp\n')] |
import flask
blueprint = flask.Blueprint('home', 'home')
@blueprint.route('/')
def index():
return flask.render_template('home/index.html')
| [
"flask.render_template",
"flask.Blueprint"
] | [((26, 57), 'flask.Blueprint', 'flask.Blueprint', (['"""home"""', '"""home"""'], {}), "('home', 'home')\n", (41, 57), False, 'import flask\n'), ((106, 146), 'flask.render_template', 'flask.render_template', (['"""home/index.html"""'], {}), "('home/index.html')\n", (127, 146), False, 'import flask\n')] |
from flask_wtf import FlaskForm
from wtforms import FloatField, StringField, SubmitField
from wtforms.validators import DataRequired
class WoodcutForm(FlaskForm):
material_size = FloatField('board size', validators=[DataRequired()], default=2050.0)
item_list = StringField('list of pieces you need (comma separated please!)', validators=[DataRequired()], default='450, 444, 436, 430, 389, 389, 386, 375, 362, 362, 261, 261, 261')
submit = SubmitField('Calculate')
# make custom validator that makes sure we have a list of integers or floats in item_list | [
"wtforms.validators.DataRequired",
"wtforms.SubmitField"
] | [((443, 467), 'wtforms.SubmitField', 'SubmitField', (['"""Calculate"""'], {}), "('Calculate')\n", (454, 467), False, 'from wtforms import FloatField, StringField, SubmitField\n'), ((218, 232), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (230, 232), False, 'from wtforms.validators import DataRequired\n'), ((341, 355), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (353, 355), False, 'from wtforms.validators import DataRequired\n')] |
import os
import pygame
import time
import random
import glob
last_point = [0, 360]
images = []
image_index = 0
def setup(screen, etc):
global images
#for filepath in sorted(glob.glob('../patches/scope-image/*.png')):
for filepath in sorted(glob.glob('../*.png')):
filename = os.path.basename(filepath)
# print 'loading image file: ' + filename
img = pygame.image.load(filepath).convert()
images.append(img)
def draw(screen, etc):
global last_point, owen, image_index
#owen = images[0]
image_index += 1
if image_index == len(images) : image_index = 0
#screen.set_alpha(None)
#owen.set_alpha(None)
for i in range(0, 100) :
seg(screen, etc, i)
def seg(screen, etc, i):
global last_point, images, owen
xoffset = 40
y0 = screen.get_height() // 2#random.randrange(0,1920)
y1 = (screen.get_height() // 2) + (etc.audio_in[i] / 90)#random.randrange(0,1920)
x = i * 12#random.randrange(0,1080)
color = etc.color_picker() #on knob4
last_point = [(int(etc.knob1*1280)), (int(etc.knob2*720))]
pygame.draw.circle(screen,color,(x + xoffset, y1),int(etc.knob3 * 20) + 4, 0)
pygame.draw.line(screen, color, last_point, [x + xoffset, y1], int(etc.knob3 * 20))
#screen.blit(owen, (x + xoffset, y1))
| [
"pygame.image.load",
"os.path.basename",
"glob.glob"
] | [((257, 278), 'glob.glob', 'glob.glob', (['"""../*.png"""'], {}), "('../*.png')\n", (266, 278), False, 'import glob\n'), ((300, 326), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (316, 326), False, 'import os\n'), ((391, 418), 'pygame.image.load', 'pygame.image.load', (['filepath'], {}), '(filepath)\n', (408, 418), False, 'import pygame\n')] |
"""Form definitions, allow easy validation of input and rendering of forms
"""
# future imports
from __future__ import absolute_import
# third-party imports
from wtforms import BooleanField
from wtforms import StringField
from wtforms import TextAreaField
from wtforms import validators
# local imports
from app.forms.base import SerialiserForm
from app.forms.utils.serialisers import ModelSerialiser
from app.models.pages import MetaData
class PageForm(SerialiserForm):
"""Stores meta data for each page.
"""
visible = BooleanField('Public')
title = StringField(
'Meta Title',
validators=[validators.Optional()],
)
# Repeated value
tags = StringField(
'Meta Tags',
description='This should be a comma seperated list of tags.',
)
nav = StringField(
'Label for sidebar link',
validators=[validators.Optional()],
)
description = TextAreaField('Meta Description')
page__title = StringField(
'Page Title',
validators=[validators.Optional()],
)
page__sub_title = TextAreaField(
'Page Copy',
validators=[validators.Optional()],
)
class Serializer(ModelSerialiser):
model = MetaData
list_fields = [
('visible', {
'type': 'visible',
}),
('title', {
'label': 'Meta Title'
}),
('page.title', {
'label': 'Page Title',
}),
('order', {
'label': 'Order',
'type': 'ordering',
}),
]
fieldsets = [
{
'fields': (
'visible',
),
},
{
'title': 'Page Content',
'fields': (
'page__title',
'page__sub_title',
),
},
{
'title': 'Sidebar',
'fields': (
'nav',
),
},
{
'title': 'Meta Data',
'fields': (
'title',
'tags',
'description',
),
},
]
| [
"wtforms.BooleanField",
"wtforms.StringField",
"wtforms.validators.Optional",
"wtforms.TextAreaField"
] | [((536, 558), 'wtforms.BooleanField', 'BooleanField', (['"""Public"""'], {}), "('Public')\n", (548, 558), False, 'from wtforms import BooleanField\n'), ((688, 779), 'wtforms.StringField', 'StringField', (['"""Meta Tags"""'], {'description': '"""This should be a comma seperated list of tags."""'}), "('Meta Tags', description=\n 'This should be a comma seperated list of tags.')\n", (699, 779), False, 'from wtforms import StringField\n'), ((923, 956), 'wtforms.TextAreaField', 'TextAreaField', (['"""Meta Description"""'], {}), "('Meta Description')\n", (936, 956), False, 'from wtforms import TextAreaField\n'), ((626, 647), 'wtforms.validators.Optional', 'validators.Optional', ([], {}), '()\n', (645, 647), False, 'from wtforms import validators\n'), ((875, 896), 'wtforms.validators.Optional', 'validators.Optional', ([], {}), '()\n', (894, 896), False, 'from wtforms import validators\n'), ((1030, 1051), 'wtforms.validators.Optional', 'validators.Optional', ([], {}), '()\n', (1049, 1051), False, 'from wtforms import validators\n'), ((1138, 1159), 'wtforms.validators.Optional', 'validators.Optional', ([], {}), '()\n', (1157, 1159), False, 'from wtforms import validators\n')] |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import unittest
from test_util import ScanPointGeneratorTest
from scanpointgenerator import LineGenerator
class LineGeneratorTest(ScanPointGeneratorTest):
def test_init(self):
g = LineGenerator("x", "mm", 1.0, 9.0, 5, alternate=True)
self.assertEqual(dict(x="mm"), g.axis_units())
self.assertEqual(["x"], g.axes)
self.assertEqual(True, g.alternate)
def test_init_multi_dim(self):
g = LineGenerator(["x", "y"], ["mm", "cm"], [2., -2.], [4., -4.], 3)
self.assertEqual(["x", "y"], g.axes)
self.assertEqual({"x":"mm", "y":"cm"}, g.axis_units())
self.assertEqual(False, g.alternate)
def test_array_positions(self):
g = LineGenerator("x", "mm", 1.0, 9.0, 5, alternate=True)
positions = [1.0, 3.0, 5.0, 7.0, 9.0]
bounds = [0.0, 2.0, 4.0, 6.0, 8.0, 10.0]
indexes = [0, 1, 2, 3, 4]
g.prepare_positions()
g.prepare_bounds()
self.assertEqual(positions, g.positions['x'].tolist())
self.assertEqual(bounds, g.bounds['x'].tolist())
def test_negative_direction(self):
g = LineGenerator("x", "mm", 2, -2, 5)
positions = [2., 1., 0., -1., -2.]
bounds = [2.5, 1.5, 0.5, -0.5, -1.5, -2.5]
g.prepare_positions()
g.prepare_bounds()
self.assertEqual(positions, g.positions['x'].tolist())
self.assertEqual(bounds, g.bounds['x'].tolist())
def test_single_point(self):
g = LineGenerator("x", "mm", 1.0, 4.0, 1)
g.prepare_positions()
g.prepare_bounds()
self.assertEqual([2.5], g.positions["x"].tolist())
self.assertEqual([1.0, 4.0], g.bounds["x"].tolist())
def test_single_point_exact(self):
g = LineGenerator("x", "mm", 1.0, 1.0, 1)
g.prepare_positions()
g.prepare_bounds()
self.assertEqual([1.0], g.positions["x"].tolist())
self.assertEqual([1.0, 1.0], g.bounds["x"].tolist())
def test_duplicate_name_raises(self):
with self.assertRaises(AssertionError):
LineGenerator(["x", "x"], "mm", 0.0, 1.0, 5)
def test_to_dict(self):
g = LineGenerator("x", "mm", 1.0, 9.0, 5, alternate=True)
expected_dict = dict()
expected_dict['typeid'] = "scanpointgenerator:generator/LineGenerator:1.0"
expected_dict['axes'] = ["x"]
expected_dict['units'] = ["mm"]
expected_dict['start'] = [1.0]
expected_dict['stop'] = [9.0]
expected_dict['size'] = 5
expected_dict['alternate'] = True
d = g.to_dict()
self.assertEqual(expected_dict, d)
def test_from_dict(self):
_dict = dict()
_dict['axes'] = ["x"]
_dict['units'] = ["mm"]
_dict['start'] = [1.0]
_dict['stop'] = [9.0]
_dict['size'] = 5
_dict['alternate'] = True
units_dict = dict()
units_dict['x'] = "mm"
gen = LineGenerator.from_dict(_dict)
self.assertEqual(["x"], gen.axes)
self.assertEqual(units_dict, gen.axis_units())
self.assertEqual([1.0], gen.start)
self.assertEqual([9.0], gen.stop)
self.assertEqual(5, gen.size)
self.assertTrue(gen.alternate)
class LineGenerator2DTest(ScanPointGeneratorTest):
def test_init(self):
g = LineGenerator(["x", "y"], ["mm", "mm"], [1.0, 2.0], [5.0, 10.0], 5)
self.assertEqual(dict(x="mm", y="mm"), g.axis_units())
self.assertEqual(["x", "y"], g.axes)
def test_given_inconsistent_dims_then_raise_error(self):
with self.assertRaises(ValueError):
LineGenerator("x", "mm", [1.0], [5.0, 10.0], 5)
def test_give_one_point_then_step_zero(self):
l = LineGenerator(["1", "2", "3", "4", "5"], "mm", [0.0]*5, [10.0]*5, 1)
l.prepare_positions()
assert list(l.positions.values()) == 5*[5.0]
def test_array_positions(self):
g = LineGenerator(["x", "y"], "mm", [1.0, 2.0], [5.0, 10.0], 5)
g.prepare_positions()
g.prepare_bounds()
x_positions = [1.0, 2.0, 3.0, 4.0, 5.0]
y_positions = [2.0, 4.0, 6.0, 8.0, 10.0]
x_bounds = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5]
y_bounds = [1, 3, 5, 7, 9, 11]
self.assertEqual(x_positions, g.positions['x'].tolist())
self.assertEqual(y_positions, g.positions['y'].tolist())
self.assertEqual(x_bounds, g.bounds['x'].tolist())
self.assertEqual(y_bounds, g.bounds['y'].tolist())
def test_array_positions_single_point(self):
g = LineGenerator(["x", "y"], "mm", [1.0, -3.0], [5.0, -4.0], 1)
g.prepare_positions()
g.prepare_bounds()
x_positions = [3.0]
y_positions = [-3.5]
x_bounds = [1.0, 5.0]
y_bounds = [-3.0, -4.0]
self.assertEqual(x_positions, g.positions['x'].tolist())
self.assertEqual(y_positions, g.positions['y'].tolist())
self.assertEqual(x_bounds, g.bounds['x'].tolist())
self.assertEqual(y_bounds, g.bounds['y'].tolist())
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"unittest.main",
"os.path.dirname",
"scanpointgenerator.LineGenerator",
"scanpointgenerator.LineGenerator.from_dict"
] | [((5120, 5146), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (5133, 5146), False, 'import unittest\n'), ((50, 75), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (65, 75), False, 'import os\n'), ((280, 333), 'scanpointgenerator.LineGenerator', 'LineGenerator', (['"""x"""', '"""mm"""', '(1.0)', '(9.0)', '(5)'], {'alternate': '(True)'}), "('x', 'mm', 1.0, 9.0, 5, alternate=True)\n", (293, 333), False, 'from scanpointgenerator import LineGenerator\n'), ((521, 589), 'scanpointgenerator.LineGenerator', 'LineGenerator', (["['x', 'y']", "['mm', 'cm']", '[2.0, -2.0]', '[4.0, -4.0]', '(3)'], {}), "(['x', 'y'], ['mm', 'cm'], [2.0, -2.0], [4.0, -4.0], 3)\n", (534, 589), False, 'from scanpointgenerator import LineGenerator\n'), ((788, 841), 'scanpointgenerator.LineGenerator', 'LineGenerator', (['"""x"""', '"""mm"""', '(1.0)', '(9.0)', '(5)'], {'alternate': '(True)'}), "('x', 'mm', 1.0, 9.0, 5, alternate=True)\n", (801, 841), False, 'from scanpointgenerator import LineGenerator\n'), ((1200, 1234), 'scanpointgenerator.LineGenerator', 'LineGenerator', (['"""x"""', '"""mm"""', '(2)', '(-2)', '(5)'], {}), "('x', 'mm', 2, -2, 5)\n", (1213, 1234), False, 'from scanpointgenerator import LineGenerator\n'), ((1552, 1589), 'scanpointgenerator.LineGenerator', 'LineGenerator', (['"""x"""', '"""mm"""', '(1.0)', '(4.0)', '(1)'], {}), "('x', 'mm', 1.0, 4.0, 1)\n", (1565, 1589), False, 'from scanpointgenerator import LineGenerator\n'), ((1819, 1856), 'scanpointgenerator.LineGenerator', 'LineGenerator', (['"""x"""', '"""mm"""', '(1.0)', '(1.0)', '(1)'], {}), "('x', 'mm', 1.0, 1.0, 1)\n", (1832, 1856), False, 'from scanpointgenerator import LineGenerator\n'), ((2223, 2276), 'scanpointgenerator.LineGenerator', 'LineGenerator', (['"""x"""', '"""mm"""', '(1.0)', '(9.0)', '(5)'], {'alternate': '(True)'}), "('x', 'mm', 1.0, 9.0, 5, alternate=True)\n", (2236, 2276), False, 'from scanpointgenerator import LineGenerator\n'), ((3002, 3032), 'scanpointgenerator.LineGenerator.from_dict', 'LineGenerator.from_dict', (['_dict'], {}), '(_dict)\n', (3025, 3032), False, 'from scanpointgenerator import LineGenerator\n'), ((3384, 3451), 'scanpointgenerator.LineGenerator', 'LineGenerator', (["['x', 'y']", "['mm', 'mm']", '[1.0, 2.0]', '[5.0, 10.0]', '(5)'], {}), "(['x', 'y'], ['mm', 'mm'], [1.0, 2.0], [5.0, 10.0], 5)\n", (3397, 3451), False, 'from scanpointgenerator import LineGenerator\n'), ((3789, 3861), 'scanpointgenerator.LineGenerator', 'LineGenerator', (["['1', '2', '3', '4', '5']", '"""mm"""', '([0.0] * 5)', '([10.0] * 5)', '(1)'], {}), "(['1', '2', '3', '4', '5'], 'mm', [0.0] * 5, [10.0] * 5, 1)\n", (3802, 3861), False, 'from scanpointgenerator import LineGenerator\n'), ((3990, 4049), 'scanpointgenerator.LineGenerator', 'LineGenerator', (["['x', 'y']", '"""mm"""', '[1.0, 2.0]', '[5.0, 10.0]', '(5)'], {}), "(['x', 'y'], 'mm', [1.0, 2.0], [5.0, 10.0], 5)\n", (4003, 4049), False, 'from scanpointgenerator import LineGenerator\n'), ((4603, 4663), 'scanpointgenerator.LineGenerator', 'LineGenerator', (["['x', 'y']", '"""mm"""', '[1.0, -3.0]', '[5.0, -4.0]', '(1)'], {}), "(['x', 'y'], 'mm', [1.0, -3.0], [5.0, -4.0], 1)\n", (4616, 4663), False, 'from scanpointgenerator import LineGenerator\n'), ((2137, 2181), 'scanpointgenerator.LineGenerator', 'LineGenerator', (["['x', 'x']", '"""mm"""', '(0.0)', '(1.0)', '(5)'], {}), "(['x', 'x'], 'mm', 0.0, 1.0, 5)\n", (2150, 2181), False, 'from scanpointgenerator import LineGenerator\n'), ((3678, 3725), 'scanpointgenerator.LineGenerator', 'LineGenerator', (['"""x"""', '"""mm"""', '[1.0]', '[5.0, 10.0]', '(5)'], {}), "('x', 'mm', [1.0], [5.0, 10.0], 5)\n", (3691, 3725), False, 'from scanpointgenerator import LineGenerator\n')] |
from __future__ import print_function
import os
import sys
def main():
# This part should execute as fast as possible so as not to slow down
# shell startup. For this reason we do not use ArgumentParser here and
# do not import anything on module level except for some standard modules.
if len(sys.argv) == 2 and sys.argv[1] == 'shellsrc':
from luamb._shell import shellsrc
print(shellsrc)
sys.exit()
from luamb._luamb import Luamb, LuambException
def error(msg, exit_status=1):
msg = '\033[0;31m{}\033[0m'.format(msg)
print(msg, file=sys.stderr)
sys.exit(exit_status)
try:
import hererocks
except ImportError:
error("'hererocks' is not installed")
luamb_dir = os.environ.get('LUAMB_DIR')
if not luamb_dir:
error("LUAMB_DIR variable is not set")
luamb_dir = os.path.expandvars(luamb_dir)
if not os.path.isdir(luamb_dir):
error("LUAMB_DIR='{}' is not a directory".format(luamb_dir))
luamb = Luamb(
env_dir=luamb_dir,
active_env=os.environ.get('LUAMB_ACTIVE_ENV'),
lua_default=os.environ.get('LUAMB_LUA_DEFAULT'),
luarocks_default=os.environ.get('LUAMB_LUAROCKS_DEFAULT'),
hererocks=hererocks,
)
try:
luamb.run()
except LuambException as exc:
error(exc)
| [
"os.path.expandvars",
"os.path.isdir",
"os.environ.get",
"sys.exit"
] | [((768, 795), 'os.environ.get', 'os.environ.get', (['"""LUAMB_DIR"""'], {}), "('LUAMB_DIR')\n", (782, 795), False, 'import os\n'), ((882, 911), 'os.path.expandvars', 'os.path.expandvars', (['luamb_dir'], {}), '(luamb_dir)\n', (900, 911), False, 'import os\n'), ((433, 443), 'sys.exit', 'sys.exit', ([], {}), '()\n', (441, 443), False, 'import sys\n'), ((624, 645), 'sys.exit', 'sys.exit', (['exit_status'], {}), '(exit_status)\n', (632, 645), False, 'import sys\n'), ((923, 947), 'os.path.isdir', 'os.path.isdir', (['luamb_dir'], {}), '(luamb_dir)\n', (936, 947), False, 'import os\n'), ((1084, 1118), 'os.environ.get', 'os.environ.get', (['"""LUAMB_ACTIVE_ENV"""'], {}), "('LUAMB_ACTIVE_ENV')\n", (1098, 1118), False, 'import os\n'), ((1140, 1175), 'os.environ.get', 'os.environ.get', (['"""LUAMB_LUA_DEFAULT"""'], {}), "('LUAMB_LUA_DEFAULT')\n", (1154, 1175), False, 'import os\n'), ((1202, 1242), 'os.environ.get', 'os.environ.get', (['"""LUAMB_LUAROCKS_DEFAULT"""'], {}), "('LUAMB_LUAROCKS_DEFAULT')\n", (1216, 1242), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Simple Unit Test to MSP class
"""
import unittest
import sys
sys.path.append('../')
from msp430 import MSP, available_ports
from glob import glob
class TestMSP(unittest.TestCase):
"""docstring for TestMSP"""
def shortDescription(self):
return "Test Microcontroler class"
def setUp(self):
""" Sting up """
func = str(self.id).split('=')[-1][:-2]
func = func.split('test_')[-1]
func = func.replace('_', ' ')
out = '\rTest of ' + func + ' '
out = out.ljust(65, '-')
sys.stderr.write(out)
self.shortDescription()
port = available_ports()
if port:
port = port[0]
self.msp430 = MSP(port)
def tearDown(self):
""" Releasing test """
sys.stderr.write('Done\n')
def test_message(self):
""" A test """
self.assertIsNotNone(self.msp430.adc.read())
if __name__ == "__main__":
unittest.main()
| [
"sys.stderr.write",
"msp430.MSP",
"msp430.available_ports",
"unittest.main",
"sys.path.append"
] | [((95, 117), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (110, 117), False, 'import sys\n'), ((969, 984), 'unittest.main', 'unittest.main', ([], {}), '()\n', (982, 984), False, 'import unittest\n'), ((577, 598), 'sys.stderr.write', 'sys.stderr.write', (['out'], {}), '(out)\n', (593, 598), False, 'import sys\n'), ((647, 664), 'msp430.available_ports', 'available_ports', ([], {}), '()\n', (662, 664), False, 'from msp430 import MSP, available_ports\n'), ((731, 740), 'msp430.MSP', 'MSP', (['port'], {}), '(port)\n', (734, 740), False, 'from msp430 import MSP, available_ports\n'), ((805, 831), 'sys.stderr.write', 'sys.stderr.write', (['"""Done\n"""'], {}), "('Done\\n')\n", (821, 831), False, 'import sys\n')] |
# browsers
from lazagne.softwares.browsers.mozilla import Mozilla
from lazagne.softwares.browsers.chrome import Chrome
from lazagne.softwares.browsers.coccoc import CocCoc
from lazagne.softwares.browsers.opera import Opera
from lazagne.softwares.browsers.ie import IE
# windows
from lazagne.softwares.windows.autologon import Autologon
from lazagne.softwares.windows.credman import Credman
from lazagne.softwares.windows.vault import Vault
from lazagne.softwares.windows.cachedump import Cachedump
from lazagne.softwares.windows.hashdump import Hashdump
from lazagne.softwares.windows.lsa_secrets import LSASecrets
# sysadmin
from lazagne.softwares.sysadmin.filezilla import Filezilla
from lazagne.softwares.sysadmin.cyberduck import Cyberduck
from lazagne.softwares.sysadmin.puttycm import Puttycm
from lazagne.softwares.sysadmin.winscp import WinSCP
from lazagne.softwares.sysadmin.coreftp import CoreFTP
from lazagne.softwares.sysadmin.ftpnavigator import FtpNavigator
from lazagne.softwares.sysadmin.apachedirectorystudio import ApacheDirectoryStudio
from lazagne.softwares.sysadmin.opensshforwindows import OpenSSHForWindows
from lazagne.softwares.sysadmin.rdpmanager import RDPManager
from lazagne.softwares.sysadmin.unattended import Unattended
# svn
from lazagne.softwares.svn.tortoise import Tortoise
# git
from lazagne.softwares.git.gitforwindows import GitForWindows
# maven
from lazagne.softwares.maven.mavenrepositories import MavenRepositories
# chats
from lazagne.softwares.chats.skype import Skype
from lazagne.softwares.chats.pidgin import Pidgin
from lazagne.softwares.chats.jitsi import Jitsi
# wifi
from lazagne.softwares.wifi.wifi import Wifi
# mails
from lazagne.softwares.mails.outlook import Outlook
# databases
from lazagne.softwares.databases.sqldeveloper import SQLDeveloper
from lazagne.softwares.databases.squirrel import Squirrel
from lazagne.softwares.databases.dbvis import Dbvisualizer
from lazagne.softwares.databases.robomongo import Robomongo
# games
from lazagne.softwares.games.roguestale import RoguesTale
from lazagne.softwares.games.kalypsomedia import KalypsoMedia
from lazagne.softwares.games.galconfusion import GalconFusion
from lazagne.softwares.games.turba import Turba
# memory
from lazagne.softwares.memory.keepass import Keepass
from lazagne.softwares.memory.memorydump import MemoryDump
# php
from lazagne.softwares.php.composer import Composer
def get_categories():
category = {
'chats': {'help': 'Chat clients supported'},
'sysadmin': {'help': 'SCP/SSH/FTP/FTPS clients supported'},
'database': {'help': 'SQL/NoSQL clients supported'},
'svn': {'help': 'SVN clients supported'},
'git': {'help': 'GIT clients supported'},
'maven': {'help': 'Maven java build tool'},
'php': {'help': 'PHP build tool'},
'mails': {'help': 'Email clients supported'},
'memory': {'help': 'Retrieve passwords from memory'},
'wifi': {'help': 'Wifi'},
'browsers': {'help': 'Web browsers supported'},
'windows': {'help': 'Windows credentials (credential manager, etc.)'},
'games': {'help': 'Games etc.'}
}
return category
def get_modules():
moduleNames = [
ApacheDirectoryStudio(),
Autologon(),
Dbvisualizer(),
Chrome(),
CocCoc(),
CoreFTP(),
Cyberduck(),
Filezilla(),
FtpNavigator(),
GalconFusion(),
GitForWindows(),
IE(),
Jitsi(),
KalypsoMedia(),
MavenRepositories(),
MemoryDump(), # retrieve browers and keepass passwords
Keepass(), # should be launched after memory dump
Mozilla(),
Composer(),
Credman(),
OpenSSHForWindows(),
Opera(),
Outlook(),
Pidgin(),
Puttycm(),
RDPManager(),
Robomongo(),
RoguesTale(),
Tortoise(),
Skype(),
SQLDeveloper(),
Squirrel(),
Turba(),
Unattended(),
Vault(),
Wifi(),
WinSCP(),
Cachedump(),
Hashdump(),
LSASecrets()
]
return moduleNames
| [
"lazagne.softwares.databases.dbvis.Dbvisualizer",
"lazagne.softwares.chats.jitsi.Jitsi",
"lazagne.softwares.windows.hashdump.Hashdump",
"lazagne.softwares.sysadmin.unattended.Unattended",
"lazagne.softwares.sysadmin.puttycm.Puttycm",
"lazagne.softwares.databases.sqldeveloper.SQLDeveloper",
"lazagne.softwares.sysadmin.cyberduck.Cyberduck",
"lazagne.softwares.chats.pidgin.Pidgin",
"lazagne.softwares.maven.mavenrepositories.MavenRepositories",
"lazagne.softwares.browsers.coccoc.CocCoc",
"lazagne.softwares.sysadmin.ftpnavigator.FtpNavigator",
"lazagne.softwares.games.roguestale.RoguesTale",
"lazagne.softwares.sysadmin.filezilla.Filezilla",
"lazagne.softwares.windows.vault.Vault",
"lazagne.softwares.games.turba.Turba",
"lazagne.softwares.browsers.opera.Opera",
"lazagne.softwares.windows.credman.Credman",
"lazagne.softwares.mails.outlook.Outlook",
"lazagne.softwares.memory.memorydump.MemoryDump",
"lazagne.softwares.browsers.mozilla.Mozilla",
"lazagne.softwares.games.kalypsomedia.KalypsoMedia",
"lazagne.softwares.chats.skype.Skype",
"lazagne.softwares.sysadmin.apachedirectorystudio.ApacheDirectoryStudio",
"lazagne.softwares.games.galconfusion.GalconFusion",
"lazagne.softwares.databases.robomongo.Robomongo",
"lazagne.softwares.git.gitforwindows.GitForWindows",
"lazagne.softwares.memory.keepass.Keepass",
"lazagne.softwares.php.composer.Composer",
"lazagne.softwares.svn.tortoise.Tortoise",
"lazagne.softwares.windows.cachedump.Cachedump",
"lazagne.softwares.sysadmin.coreftp.CoreFTP",
"lazagne.softwares.browsers.chrome.Chrome",
"lazagne.softwares.browsers.ie.IE",
"lazagne.softwares.sysadmin.opensshforwindows.OpenSSHForWindows",
"lazagne.softwares.windows.lsa_secrets.LSASecrets",
"lazagne.softwares.sysadmin.winscp.WinSCP",
"lazagne.softwares.sysadmin.rdpmanager.RDPManager",
"lazagne.softwares.databases.squirrel.Squirrel",
"lazagne.softwares.wifi.wifi.Wifi",
"lazagne.softwares.windows.autologon.Autologon"
] | [((3193, 3216), 'lazagne.softwares.sysadmin.apachedirectorystudio.ApacheDirectoryStudio', 'ApacheDirectoryStudio', ([], {}), '()\n', (3214, 3216), False, 'from lazagne.softwares.sysadmin.apachedirectorystudio import ApacheDirectoryStudio\n'), ((3221, 3232), 'lazagne.softwares.windows.autologon.Autologon', 'Autologon', ([], {}), '()\n', (3230, 3232), False, 'from lazagne.softwares.windows.autologon import Autologon\n'), ((3237, 3251), 'lazagne.softwares.databases.dbvis.Dbvisualizer', 'Dbvisualizer', ([], {}), '()\n', (3249, 3251), False, 'from lazagne.softwares.databases.dbvis import Dbvisualizer\n'), ((3257, 3265), 'lazagne.softwares.browsers.chrome.Chrome', 'Chrome', ([], {}), '()\n', (3263, 3265), False, 'from lazagne.softwares.browsers.chrome import Chrome\n'), ((3271, 3279), 'lazagne.softwares.browsers.coccoc.CocCoc', 'CocCoc', ([], {}), '()\n', (3277, 3279), False, 'from lazagne.softwares.browsers.coccoc import CocCoc\n'), ((3284, 3293), 'lazagne.softwares.sysadmin.coreftp.CoreFTP', 'CoreFTP', ([], {}), '()\n', (3291, 3293), False, 'from lazagne.softwares.sysadmin.coreftp import CoreFTP\n'), ((3299, 3310), 'lazagne.softwares.sysadmin.cyberduck.Cyberduck', 'Cyberduck', ([], {}), '()\n', (3308, 3310), False, 'from lazagne.softwares.sysadmin.cyberduck import Cyberduck\n'), ((3315, 3326), 'lazagne.softwares.sysadmin.filezilla.Filezilla', 'Filezilla', ([], {}), '()\n', (3324, 3326), False, 'from lazagne.softwares.sysadmin.filezilla import Filezilla\n'), ((3332, 3346), 'lazagne.softwares.sysadmin.ftpnavigator.FtpNavigator', 'FtpNavigator', ([], {}), '()\n', (3344, 3346), False, 'from lazagne.softwares.sysadmin.ftpnavigator import FtpNavigator\n'), ((3352, 3366), 'lazagne.softwares.games.galconfusion.GalconFusion', 'GalconFusion', ([], {}), '()\n', (3364, 3366), False, 'from lazagne.softwares.games.galconfusion import GalconFusion\n'), ((3371, 3386), 'lazagne.softwares.git.gitforwindows.GitForWindows', 'GitForWindows', ([], {}), '()\n', (3384, 3386), False, 'from lazagne.softwares.git.gitforwindows import GitForWindows\n'), ((3391, 3395), 'lazagne.softwares.browsers.ie.IE', 'IE', ([], {}), '()\n', (3393, 3395), False, 'from lazagne.softwares.browsers.ie import IE\n'), ((3400, 3407), 'lazagne.softwares.chats.jitsi.Jitsi', 'Jitsi', ([], {}), '()\n', (3405, 3407), False, 'from lazagne.softwares.chats.jitsi import Jitsi\n'), ((3412, 3426), 'lazagne.softwares.games.kalypsomedia.KalypsoMedia', 'KalypsoMedia', ([], {}), '()\n', (3424, 3426), False, 'from lazagne.softwares.games.kalypsomedia import KalypsoMedia\n'), ((3431, 3450), 'lazagne.softwares.maven.mavenrepositories.MavenRepositories', 'MavenRepositories', ([], {}), '()\n', (3448, 3450), False, 'from lazagne.softwares.maven.mavenrepositories import MavenRepositories\n'), ((3455, 3467), 'lazagne.softwares.memory.memorydump.MemoryDump', 'MemoryDump', ([], {}), '()\n', (3465, 3467), False, 'from lazagne.softwares.memory.memorydump import MemoryDump\n'), ((3515, 3524), 'lazagne.softwares.memory.keepass.Keepass', 'Keepass', ([], {}), '()\n', (3522, 3524), False, 'from lazagne.softwares.memory.keepass import Keepass\n'), ((3571, 3580), 'lazagne.softwares.browsers.mozilla.Mozilla', 'Mozilla', ([], {}), '()\n', (3578, 3580), False, 'from lazagne.softwares.browsers.mozilla import Mozilla\n'), ((3585, 3595), 'lazagne.softwares.php.composer.Composer', 'Composer', ([], {}), '()\n', (3593, 3595), False, 'from lazagne.softwares.php.composer import Composer\n'), ((3600, 3609), 'lazagne.softwares.windows.credman.Credman', 'Credman', ([], {}), '()\n', (3607, 3609), False, 'from lazagne.softwares.windows.credman import Credman\n'), ((3614, 3633), 'lazagne.softwares.sysadmin.opensshforwindows.OpenSSHForWindows', 'OpenSSHForWindows', ([], {}), '()\n', (3631, 3633), False, 'from lazagne.softwares.sysadmin.opensshforwindows import OpenSSHForWindows\n'), ((3639, 3646), 'lazagne.softwares.browsers.opera.Opera', 'Opera', ([], {}), '()\n', (3644, 3646), False, 'from lazagne.softwares.browsers.opera import Opera\n'), ((3651, 3660), 'lazagne.softwares.mails.outlook.Outlook', 'Outlook', ([], {}), '()\n', (3658, 3660), False, 'from lazagne.softwares.mails.outlook import Outlook\n'), ((3665, 3673), 'lazagne.softwares.chats.pidgin.Pidgin', 'Pidgin', ([], {}), '()\n', (3671, 3673), False, 'from lazagne.softwares.chats.pidgin import Pidgin\n'), ((3678, 3687), 'lazagne.softwares.sysadmin.puttycm.Puttycm', 'Puttycm', ([], {}), '()\n', (3685, 3687), False, 'from lazagne.softwares.sysadmin.puttycm import Puttycm\n'), ((3692, 3704), 'lazagne.softwares.sysadmin.rdpmanager.RDPManager', 'RDPManager', ([], {}), '()\n', (3702, 3704), False, 'from lazagne.softwares.sysadmin.rdpmanager import RDPManager\n'), ((3709, 3720), 'lazagne.softwares.databases.robomongo.Robomongo', 'Robomongo', ([], {}), '()\n', (3718, 3720), False, 'from lazagne.softwares.databases.robomongo import Robomongo\n'), ((3730, 3742), 'lazagne.softwares.games.roguestale.RoguesTale', 'RoguesTale', ([], {}), '()\n', (3740, 3742), False, 'from lazagne.softwares.games.roguestale import RoguesTale\n'), ((3747, 3757), 'lazagne.softwares.svn.tortoise.Tortoise', 'Tortoise', ([], {}), '()\n', (3755, 3757), False, 'from lazagne.softwares.svn.tortoise import Tortoise\n'), ((3763, 3770), 'lazagne.softwares.chats.skype.Skype', 'Skype', ([], {}), '()\n', (3768, 3770), False, 'from lazagne.softwares.chats.skype import Skype\n'), ((3776, 3790), 'lazagne.softwares.databases.sqldeveloper.SQLDeveloper', 'SQLDeveloper', ([], {}), '()\n', (3788, 3790), False, 'from lazagne.softwares.databases.sqldeveloper import SQLDeveloper\n'), ((3796, 3806), 'lazagne.softwares.databases.squirrel.Squirrel', 'Squirrel', ([], {}), '()\n', (3804, 3806), False, 'from lazagne.softwares.databases.squirrel import Squirrel\n'), ((3811, 3818), 'lazagne.softwares.games.turba.Turba', 'Turba', ([], {}), '()\n', (3816, 3818), False, 'from lazagne.softwares.games.turba import Turba\n'), ((3823, 3835), 'lazagne.softwares.sysadmin.unattended.Unattended', 'Unattended', ([], {}), '()\n', (3833, 3835), False, 'from lazagne.softwares.sysadmin.unattended import Unattended\n'), ((3840, 3847), 'lazagne.softwares.windows.vault.Vault', 'Vault', ([], {}), '()\n', (3845, 3847), False, 'from lazagne.softwares.windows.vault import Vault\n'), ((3852, 3858), 'lazagne.softwares.wifi.wifi.Wifi', 'Wifi', ([], {}), '()\n', (3856, 3858), False, 'from lazagne.softwares.wifi.wifi import Wifi\n'), ((3864, 3872), 'lazagne.softwares.sysadmin.winscp.WinSCP', 'WinSCP', ([], {}), '()\n', (3870, 3872), False, 'from lazagne.softwares.sysadmin.winscp import WinSCP\n'), ((3877, 3888), 'lazagne.softwares.windows.cachedump.Cachedump', 'Cachedump', ([], {}), '()\n', (3886, 3888), False, 'from lazagne.softwares.windows.cachedump import Cachedump\n'), ((3893, 3903), 'lazagne.softwares.windows.hashdump.Hashdump', 'Hashdump', ([], {}), '()\n', (3901, 3903), False, 'from lazagne.softwares.windows.hashdump import Hashdump\n'), ((3908, 3920), 'lazagne.softwares.windows.lsa_secrets.LSASecrets', 'LSASecrets', ([], {}), '()\n', (3918, 3920), False, 'from lazagne.softwares.windows.lsa_secrets import LSASecrets\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
import datetime
import xlwt3
import openpyxl
import tempfile
import logging
logger = logging.getLogger('xxx')
def string_none(x=None):
if x is None:
return ''
else:
return str(x)
def CONV_xlsx2xls(arg=None, cfg=None):
logger.info('CONVERT: xlsx >> xls')
file_dir = Path(str(cfg[str(arg.section)]['source']))
result_dir = Path(str(cfg[str(arg.section)]['destination']))
xlsx_list = list(file_dir.glob('**/*.xlsx'))
if not result_dir.exists():
result_dir.mkdir(parents=True)
if xlsx_list:
sr = Path(str(cfg[arg.section]['saved_filenames']))
if arg.save_filenames is True and not sr.is_dir():
ftemp = sr.open('w')
else:
ftemp = tempfile.TemporaryFile('w')
logger.info(' > start - {0}'.format(datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')))
for i in xlsx_list:
try:
w_book = xlwt3.Workbook()
sh = 0
w_sheet = w_book.add_sheet('sheet_'+str(sh))
r_book = openpyxl.load_workbook(str(i), read_only=True, use_iterators=True)
r_sheet = r_book.active
tmp_row = []
cnt = 0
count2= 0
logger.info(str(i) + ' /' + str(sh))
for rrr in r_sheet.rows:
if cnt > 60000:
sh = sh + 1
cnt = 0
w_sheet = w_book.add_sheet('sheet_'+str(sh))
logger.info(str(i) + ' /' + str(sh))
tmp_row = [string_none(i.value) for i in rrr]
for g in range(len(tmp_row)):
w_sheet.write(cnt, g, tmp_row[g])
cnt = cnt + 1
count2 = count2 + 1
if arg.write_count:
w_book_name = str(Path(result_dir).joinpath(''.join([Path(Path(i).name).stem, '_C_s', str(sh), '_r', str(count2), '.xls'])))
else:
w_book_name = str(Path(result_dir).joinpath(''.join([Path(Path(i).name).stem, '_C', '.xls'])))
w_book.save(w_book_name)
ftemp.write(w_book_name+'\n')
except Exception as ee:
logger.error("{0} - ERROR: {1}".format(datetime.datetime.now().strftime('%d %b %Y %H:%M:%S'), ee))
continue
ftemp.close()
logger.info(' > stop - {0}'.format(datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')))
return True
else:
return False
def Convert(arg=None, cfg=None):
if arg and cfg:
subtypes = {\
'xlsx2xls': CONV_xlsx2xls
}
subtypes[arg.type](arg, cfg)
logger.info('END\n-----------\n\n')
return True
else:
return False
if __name__ == '__main__':
pass
| [
"logging.getLogger",
"pathlib.Path",
"datetime.datetime.now",
"xlwt3.Workbook",
"tempfile.TemporaryFile"
] | [((168, 192), 'logging.getLogger', 'logging.getLogger', (['"""xxx"""'], {}), "('xxx')\n", (185, 192), False, 'import logging\n'), ((837, 864), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', (['"""w"""'], {}), "('w')\n", (859, 864), False, 'import tempfile\n'), ((1039, 1055), 'xlwt3.Workbook', 'xlwt3.Workbook', ([], {}), '()\n', (1053, 1055), False, 'import xlwt3\n'), ((910, 933), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (931, 933), False, 'import datetime\n'), ((2604, 2627), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2625, 2627), False, 'import datetime\n'), ((2022, 2038), 'pathlib.Path', 'Path', (['result_dir'], {}), '(result_dir)\n', (2026, 2038), False, 'from pathlib import Path\n'), ((2191, 2207), 'pathlib.Path', 'Path', (['result_dir'], {}), '(result_dir)\n', (2195, 2207), False, 'from pathlib import Path\n'), ((2450, 2473), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2471, 2473), False, 'import datetime\n'), ((2062, 2069), 'pathlib.Path', 'Path', (['i'], {}), '(i)\n', (2066, 2069), False, 'from pathlib import Path\n'), ((2231, 2238), 'pathlib.Path', 'Path', (['i'], {}), '(i)\n', (2235, 2238), False, 'from pathlib import Path\n')] |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Scripts for preparing data improrts to OSM.',
author='<NAME>',
license='MIT',
)
| [
"setuptools.find_packages"
] | [((81, 96), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (94, 96), False, 'from setuptools import find_packages, setup\n')] |
"""Search for zero-probability (invalid) properties
(e.g., impossible differentials or zero-correlation hulls)
by modeling the search as a sequence of SMT problems.
"""
import collections
import enum
import functools
import itertools
import warnings
from pysmt import logics
from cascada.bitvector import core
from cascada.bitvector import operation
from cascada.bitvector import context
from cascada.bitvector import ssa as cascada_ssa
from cascada import abstractproperty
from cascada.primitives import blockcipher
from cascada.smt import pysmttypes
from cascada.smt import chsearch
from cascada.smt.wrappedchmodel import get_wrapped_chmodel, get_wrapped_cipher_chmodel # needed
zip = functools.partial(zip, strict=True)
PrintingMode = chsearch.PrintingMode
INCREMENT_NUM_ROUNDS = chsearch.INCREMENT_NUM_ROUNDS
class ActiveBitMode(enum.Enum):
"""Represent the subsets of bit-vectors available depending on
which bits are activated (set to 1) for ``find_next_invalidprop_activebitmode``.
Attributes:
Default: all bit-vectors
SingleBit: bit-vectors with up to one bit activated (zero included)
MSBbit: bit-vectors with up to the most significant bit activated (zero included)
Zero: the zero bit-vector
"""
Default = enum.auto()
SingleBit = enum.auto()
MSBit = enum.auto()
Zero = enum.auto()
def _generate_bitvectors(widths, total_num_active_bits, active_bits_mode):
"""Generate lists of bit-vectors.
Given ``widths`` as a list ``[w_1, ..., w_t]`` of t integers,
this method generate all lists ``[bv_1, ..., bv_t]`` of t bit-vectors,
where:
* the `ActiveBitMode` of each ``bv_i`` is ``active_bits_mode``
* the sum of active bits of ``[bv_1, ..., bv_t]`` is ``total_num_active_bits``
* ``bv_i`` has width ``w_i``.
::
>>> list(_generate_bitvectors([2, 2], 1, ActiveBitMode.Default))
[[0b01, 0b00], [0b10, 0b00], [0b00, 0b01], [0b00, 0b10]]
>>> list(_generate_bitvectors([2, 2], 1, ActiveBitMode.SingleBit))
[[0b01, 0b00], [0b00, 0b01]]
>>> list(_generate_bitvectors([2, 2], 1, ActiveBitMode.MSBit))
[[0b10, 0b00], [0b00, 0b10]]
>>> list(_generate_bitvectors([2, 2], 0, ActiveBitMode.Zero))
[[0b00, 0b00]]
"""
if active_bits_mode == ActiveBitMode.Zero or total_num_active_bits == 0:
if total_num_active_bits != 0:
raise ValueError("total_num_active_bits != 0 but active_bits_mode=Zero")
yield [core.Constant(0, w) for w in widths]
elif active_bits_mode in [ActiveBitMode.SingleBit, ActiveBitMode.MSBit]:
for combination in itertools.combinations(range(len(widths)), total_num_active_bits):
if active_bits_mode == ActiveBitMode.MSBit:
iterables = [[w_i - 1] for i, w_i in enumerate(widths) if i in combination]
else:
# active_bits_mode == SingleBit
iterables = [range(w_i - 1) for i, w_i in enumerate(widths) if i in combination]
for w_combination in itertools.product(*iterables):
bv_list = []
counter_w_c = 0
for index_w, w in enumerate(widths):
if index_w in combination:
bv_list.append(core.Constant(1 << w_combination[counter_w_c], w))
counter_w_c += 1
else:
bv_list.append(core.Constant(0, w))
yield bv_list
elif active_bits_mode == ActiveBitMode.Default:
# Source: https://stackoverflow.com/a/10838990 and
# https://en.wikipedia.org/wiki/Combinatorial_number_system#Applications.
assert total_num_active_bits > 0
total_width = sum(widths)
n = total_width
k = total_num_active_bits
def next_combination(x):
u = (x & -x)
v = u + x
return v + (((v ^ x) // u) >> 2)
x = (1 << k) - 1 # smallest number with k active bits
while (x >> n) == 0:
bv = core.Constant(x, n)
bv_list = []
sum_w = 0
for w in widths:
bv_list.append(bv[sum_w + w - 1:sum_w])
sum_w += w
yield bv_list
x = next_combination(x)
else:
raise ValueError("invalid active_bits_mode")
class InvalidPropFinder(chsearch.ChFinder):
"""Search for zero-probability (invalid) property pairs by modeling the search as a sequence of SMT problems.
Given a characteristic model
defined for a particular `Property` (e.g., `XorDiff` or `LinearMask`),
this class finds *universally-invalid* characteristics
following the characteristic model by modelling the search as a sequence
of SMT problems in the bit-vector theory.
A *universally-invalid* characteristic is a characteristic
where the characteristic input property :math:`\\alpha`
propagates to the characteristic output property :math:`\\beta`
with probability zero regardless of the intermediate properties
(i.e., for all assignments of the intermediate properties).
In other words, the input-output property pair
:math:`(\\alpha, \\beta)` has zero propagation probability.
.. note::
For the `Difference` (resp. `LinearMask`) property,
a universally-invalid characteristic is actually an impossible
differential (resp. a zero-correlation hull).
Search for universally-invalid algebraic characteristic is not supported.
Consider the SMT problem :math:`\Omega` of whether there exists a
valid characteristic with constant input property :math:`\\alpha`
and constant output property :math:`\\beta`
(and where the intermediate properties are not specified).
The main idea of the SMT-based search is that one can check whether
:math:`\\alpha` propagates to :math:`\\beta` with probability zero
by checking whether :math:`\Omega` is unsatisfisable (UNSAT).
Note that only the validity constraints are needed to build :math:`\Omega`;
the weight constraints are ignored when searching for universally-invalid characteristics.
The initialization argument ``ch_model`` must be a subclass of
`abstractproperty.chmodel.ChModel` with up to one non-trivial transitions
(`abstractproperty.opmodel.OpModel` excluding `ModelIdentity`),
since a zero-probability characteristic with up to one non-trivial transitions
is a universally-invalid characteristic.
For a characteristic model with more than one non-trivial transitions,
the function `get_wrapped_chmodel` can be used to wrap the characteristic
model into an equivalent characteristic model with one non-trivial transition.
An `InvalidPropFinder` object is also an instance of `ChFinder` where
`assert_type` is `Validity` and with the given initialization arguments
``ch_model``, ``solver_name``, ``printing_mode``, ``filename``, ``solver_seed``
and ``env=env``. See also `ChFinder`.
Similar as `ChFinder`, the methods of `InvalidPropFinder` that search for
universally-invalid characteristics are Python `generator` functions,
returning an `iterator` that yields the universally-invalid characteristics
found in the search.
If initialization argument ``ch_model`` is a `abstractproperty.chmodel.ChModel`
(resp. `abstractproperty.chmodel.EncryptionChModel`),
then these methods yield
`abstractproperty.characteristic.Characteristic`
(resp. `abstractproperty.characteristic.EncryptionCharacteristic`) objects.
If the initialization argument ``check_universally_invalid_ch_found`` is ``True``,
all universally-invalid characteristics found in the search are checked by searching
for a valid characteristic with the same input and output property with
`ChFinder.find_next_ch`.
>>> # example of SMT problem of universally-invalid LinearMask-EncryptionCharacteristic of (wrapped) Speck32
>>> from cascada.linear.mask import LinearMask
>>> from cascada.linear.chmodel import EncryptionChModel
>>> from cascada.smt.invalidpropsearch import InvalidPropFinder
>>> from cascada.primitives import speck
>>> Speck32 = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64)
>>> Speck32.set_num_rounds(2)
>>> wrapped_ch_model = get_wrapped_chmodel(EncryptionChModel(Speck32, LinearMask))
>>> invalid_prop_finder = InvalidPropFinder(wrapped_ch_model, "z3", solver_seed=0)
>>> invalid_prop_finder.formula_size()
133
>>> print(invalid_prop_finder.hrepr(full_repr=True)) # doctest: +NORMALIZE_WHITESPACE
; characteristic model assertions
assert (_mx0 == (mx9_out :: mx7_out)) &
((~((mx1 ^ (_mp0 >>> 7)) | (mx1 ^ mp1__0)) | _tmp20affb7ca27930ce775156bcc0ecaf20) == 0xffff) &
((_tmp20affb7ca27930ce775156bcc0ecaf20 ^ (_tmp20affb7ca27930ce775156bcc0ecaf20 >> 0x0001) ^
((mx1 ^ (_mp0 >>> 7) ^ mp1__0) >> 0x0001)) == 0x0000) &
(mx1 == _mk0) & (mx1 == mx2) & (((_mp1 ^ mp1__0) <<< 2) == mx2__0) & (((_mp1 ^ mp1__0) <<< 2) == mx4) &
((~((mx6 ^ ((mx2 ^ mx2__0) >>> 7)) | (mx6 ^ mx4__0)) | _tmp824d7e7c80d9889507eb4e5d5c7be280) == 0xffff) &
((_tmp824d7e7c80d9889507eb4e5d5c7be280 ^ (_tmp824d7e7c80d9889507eb4e5d5c7be280 >> 0x0001) ^
((mx6 ^ ((mx2 ^ mx2__0) >>> 7) ^ mx4__0) >> 0x0001)) == 0x0000) &
(mx6 == _mk1) & (mx6 == mx7) & (((mx4 ^ mx4__0) <<< 2) == mx7__0) &
(((mx4 ^ mx4__0) <<< 2) == mx9) & ((mx7 ^ mx7__0) == mx7_out) & (mx9 == mx9_out)
assert PropExtract_{·, 15, 0}(_mx0) == _mx1_out
assert PropExtract_{·, 31, 16}(_mx0) == _mx2_out
::
>>> # example of SMT problem of universally-invalid XorDiff-Characteristic of Speck32-KeySchedule
>>> from cascada.differential.difference import XorDiff
>>> from cascada.differential.chmodel import ChModel
>>> from cascada.smt.invalidpropsearch import InvalidPropFinder
>>> from cascada.primitives import speck
>>> Speck32_KS = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64).key_schedule
>>> Speck32_KS.set_num_rounds(1)
>>> ch_model = ChModel(Speck32_KS, XorDiff, ["mk0", "mk1"])
>>> invalid_prop_finder = InvalidPropFinder(ch_model, "z3", solver_seed=0)
>>> invalid_prop_finder.formula_size()
42
>>> print(invalid_prop_finder.hrepr(full_repr=True)) # doctest: +NORMALIZE_WHITESPACE
; characteristic model assertions
assert ((~((mk0 >>> 7) << 0x0001) ^ (mk1 << 0x0001)) & (~((mk0 >>> 7) << 0x0001) ^ (dx1 << 0x0001)) &
((mk0 >>> 7) ^ mk1 ^ dx1 ^ ((mk0 >>> 7) << 0x0001))) == 0x0000
assert mk1 == mk1_out
assert ((mk1 <<< 2) ^ dx1) == dx3_out
"""
def __init__(self, ch_model, solver_name, check_universally_invalid_ch_found=True,
# initial_constraints=None, var_prop2ct_prop=None, exclude_zero_input_prop=None,
printing_mode=PrintingMode.Silent, filename=None,
solver_seed=None, env=None): # weight_prefix="w",
assert isinstance(ch_model, abstractproperty.chmodel.ChModel)
non_id_opmodels = []
for op_model in ch_model.assign_outprop2op_model.values():
# PropConcat/PropExtract don't create OpModel objects
if not isinstance(op_model, abstractproperty.opmodel.ModelIdentity):
non_id_opmodels.append(op_model)
if len(non_id_opmodels) >= 2:
raise ValueError("characteristic model has more than 1 OpModel (excluding Identity-based ones)"
f"\nnon-trivial OpModel ({len(non_id_opmodels)}) = {non_id_opmodels}")
super().__init__(
ch_model, assert_type=chsearch.ChModelAssertType.Validity, solver_name=solver_name,
# initial_constraints=initial_constraints, var_prop2ct_prop=var_prop2ct_prop,
raise_exception_missing_var=True,
printing_mode=printing_mode, filename=filename, solver_seed=solver_seed, env=env
)
self.check_universally_invalid_ch_found = check_universally_invalid_ch_found
# ch_model_* properties abstracted for InvalidCipherProp
@property
def _ch_model_input_prop(self):
return self.ch_model.input_prop
@property
def _ch_model_output_prop(self):
return self.ch_model.output_prop
@property
def _ch_model_assign_outprop2op_model(self):
return self.ch_model.assign_outprop2op_model
@property
def _ch_model_prop_label(self):
return self.ch_model._prop_label
def _get_uni_inv_ch(self, ct_inputs=None, ct_outputs=None, solution_var2ct=None):
"""Get the characteristic object from the constant input and output properties."""
if solution_var2ct is None:
assert ct_inputs is not None and ct_outputs is not None
solution_var2ct = collections.OrderedDict()
else:
assert ct_inputs is None and ct_outputs is None
if ct_inputs is not None:
for var_prop, ct in zip(self._ch_model_input_prop, ct_inputs):
solution_var2ct[var_prop.val] = ct
if ct_outputs is not None:
for var_prop, ct in zip(self._ch_model_output_prop, ct_outputs):
solution_var2ct[var_prop.val] = ct
for var_prop in itertools.chain(self._ch_model_input_prop, self._ch_model_output_prop):
assert var_prop.val in solution_var2ct
# get_properties_for_initialization finds all intermediate properties
# (from ct_inputs and starting from the beginning) up to
# the output property OUTP of the non-Identity transition.
# Since OUTP only depends on the output properties of the ch. model,
# OUTP is obtained through backward propagation using an SMT-solver
# (get_properties_for_initialization only does forward propagation)
constraints = []
for out_prop, op_model in self._ch_model_assign_outprop2op_model.items():
constraints.append(op_model.validity_constraint(out_prop))
extra_constraint = True
for v, c in solution_var2ct.items():
extra_constraint &= operation.BvComp(v, c)
# # debugging
# print("\n_get_uni_inv_ch")
# print("ch model:", self.ch_model)
# if hasattr(self.ch_model, "_unwrapped_ch_model"):
# print("ch model unwrapped:", self.ch_model._unwrapped_ch_model)
# if hasattr(self.ch_model, "_unwrapped_cipher_ch_model"):
# print("ch model unwrapped:", self.ch_model._unwrapped_cipher_ch_model)
# print("ct_inputs:", ct_inputs)
# print("ct_outputs:", ct_outputs)
# print("solution_var2ct:", solution_var2ct)
# print("constraints:")
# for c in constraints:
# print("\t", c)
# print("extra_constraint:", extra_constraint)
# print()
#
chsearch.environment.push_env()
env = chsearch.environment.get_env()
psr = True if self.solver_name == "btor" else False
bv2pysmt = functools.partial(pysmttypes.bv2pysmt, env=env, parse_shifts_rotations=psr)
found_unique_extended_solution = False
for r in range(1, len(constraints) + 1): # r = num constraints to remove
for constraint_indices in itertools.combinations(range(len(constraints)), r):
and_constraint = True
with context.Simplification(False):
for i in range(len(constraints)):
if i not in constraint_indices:
and_constraint &= constraints[i]
and_constraint &= extra_constraint
pysmt_formula = bv2pysmt(and_constraint, boolean=True)
pysmt_model = env.factory.get_model(pysmt_formula, logic=logics.QF_BV)
if pysmt_model is None:
# # debugging
# print(f"_get_uni_inv_ch | no solution found without constraints {constraint_indices}")
#
continue
extended_solution_var2ct = pysmttypes.pysmt_model2bv_model(pysmt_model)
exclude_last_solution = False
for model_var, model_val in extended_solution_var2ct.items():
exclude_last_solution |= ~operation.BvComp(model_var, model_val)
pysmt_formula = bv2pysmt(and_constraint & exclude_last_solution, boolean=True)
if env.factory.is_sat(pysmt_formula, logic=logics.QF_BV):
# # debugging
# second_sol = pysmttypes.pysmt_model2bv_model(env.factory.get_model(pysmt_formula, logic=logics.QF_BV))
# print(f"_get_uni_inv_ch | found 2 solutions without constraints {constraint_indices}: ",
# f"{extended_solution_var2ct}, {second_sol}")
#
continue
found_unique_extended_solution = True
break
if found_unique_extended_solution:
break
assert found_unique_extended_solution is True
if self.printing_mode != PrintingMode.Silent:
contradictions = []
for i, (out_prop, op_model) in enumerate(self._ch_model_assign_outprop2op_model.items()):
if i in constraint_indices:
contradictions.append((out_prop, op_model))
smart_print = chsearch._get_smart_print(self.filename)
smart_print(f"Contradiction found in transitions {contradictions}")
chsearch.environment.pop_env()
assert chsearch.environment.get_env() == self._env
for sol_var, sol_val in solution_var2ct.items():
assert extended_solution_var2ct[sol_var] == sol_val
# extra checks done in _pysmt_model2ch
return self._pysmt_model2ch(extended_solution_var2ct, is_pysmt_model=False, is_sat=False)
def _check(self, uni_inv_ch_found, external_var2ct=None):
assert isinstance(self.ch_model, abstractproperty.chmodel.ChModel)
assert self.ch_model == uni_inv_ch_found.ch_model
if hasattr(self.ch_model, "_unwrapped_ch_model"):
list_ch_model = [self.ch_model, self.ch_model._unwrapped_ch_model]
for v1, v2 in zip(
self.ch_model.external_var2prop.values(),
self.ch_model._unwrapped_ch_model.external_var2prop.values()
):
if isinstance(v1.val, core.Constant) or isinstance(v2.val, core.Constant):
assert v1 == v2
else:
list_ch_model = [self.ch_model]
for ch_model in list_ch_model:
var_prop2ct_prop = collections.OrderedDict()
for vp, cp in zip(ch_model.input_prop, uni_inv_ch_found.input_prop):
var_prop2ct_prop[vp] = cp
for vp, cp in zip(ch_model.output_prop, uni_inv_ch_found.output_prop):
var_prop2ct_prop[vp] = cp
if external_var2ct is not None:
for (var, prop), (other_var, other_ct) in zip(
ch_model.external_var2prop.items(), external_var2ct.items()
):
assert var == other_var
if isinstance(prop.val, core.Constant):
assert prop.val == other_ct
var_prop2ct_prop[ch_model.prop_type(var)] = ch_model.prop_type(other_ct)
ch_finder = chsearch.ChFinder(
ch_model, assert_type=self.assert_type, solver_name=self.solver_name,
var_prop2ct_prop=var_prop2ct_prop, raise_exception_missing_var=False,
printing_mode=self.printing_mode, filename=self.filename, solver_seed=self.solver_seed
)
for valid_ch_found in ch_finder.find_next_ch():
raise ValueError(
f"last characteristic found:"
f"\n - {uni_inv_ch_found}, {uni_inv_ch_found.ch_model} "
f"\nin the search is not universally-invalid; found compatible valid characteristic:"
f"\n - {valid_ch_found}"
f"\nChFinder:\n - ch_model: {ch_model}"
f"\n - var_prop2ct_prop: {var_prop2ct_prop}"
f"\n - assertions: {ch_finder.initial_constraints+list(ch_finder.chmodel_asserts)}")
del ch_finder
assert self._env == chsearch.environment.get_env()
def find_next_invalidprop_activebitmode(self, initial_num_active_bits, input_prop_activebitmode, output_prop_activebitmode):
"""Return an iterator that yields the universally-invalid characteristics found in the SMT-based search
with given `ActiveBitMode`.
This method searches for universally-invalid characteristic using SMT solvers by checking
one-by-one all input and output properties with given `ActiveBitMode`.
Given a particular input and output properties :math:`(\\alpha, \\beta)`,
the main subroutine of this method (herein call the *check subroutine*)
checks whether :math:`\\alpha`
propagates to :math:`\\beta` with probability zero by checking
with an SMT solver whether the SMT problem, of whether there exists
a valid characteristic with input property :math:`\\alpha` and output property
:math:`\\beta`, is unsatisfisable (UNSAT).
If the problem is UNSAT, the universally-invalid
`abstractproperty.characteristic.Characteristic` object with
input and output properties :math:`(\\alpha, \\beta)`
is created and *yielded*.
The check subroutine is repeated for all input and output properties where
the `ActiveBitMode` of each word in the input (resp. output) property is
``input_prop_activebitmode`` (resp. ``output_prop_activebitmode``).
The search starts considering input and output properties where
the total number of active bits is ``initial_num_active_bits``,
and the total number of active bits is incremented when
all the input and output properties are checked.
>>> # example of search for universally-invalid LinearMask-EncryptionCharacteristic of (wrapped) Speck32
>>> from cascada.linear.mask import LinearMask
>>> from cascada.linear.chmodel import EncryptionChModel
>>> from cascada.smt.invalidpropsearch import InvalidPropFinder, ActiveBitMode
>>> from cascada.primitives import speck
>>> Speck32 = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64)
>>> Speck32.set_num_rounds(2)
>>> wrapped_ch_model = get_wrapped_chmodel(EncryptionChModel(Speck32, LinearMask))
>>> invalid_prop_finder = InvalidPropFinder(wrapped_ch_model, "btor", solver_seed=0)
>>> inab, ipabm, opabm = 1, ActiveBitMode.MSBit, ActiveBitMode.MSBit
>>> for ch in invalid_prop_finder.find_next_invalidprop_activebitmode(inab, ipabm, opabm):
... print(ch.srepr())
Ch(w=Infinity, id=8000 0000, od=8000 0000)
Ch(w=Infinity, id=8000 0000, od=0000 8000)
Ch(w=Infinity, id=0000 8000, od=8000 0000)
Ch(w=Infinity, id=0000 8000, od=0000 8000)
Ch(w=Infinity, id=8000 0000, od=8000 8000)
Ch(w=Infinity, id=0000 8000, od=8000 8000)
Ch(w=Infinity, id=8000 8000, od=8000 0000)
Ch(w=Infinity, id=8000 8000, od=0000 8000)
Ch(w=Infinity, id=8000 8000, od=8000 8000)
>>> # example of SMT problem of universally-invalid XorDiff-Characteristic of Speck32-KeySchedule
>>> from cascada.differential.difference import XorDiff
>>> from cascada.differential.chmodel import ChModel
>>> Speck32_KS = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64).key_schedule
>>> Speck32_KS.set_num_rounds(1)
>>> ch_model = ChModel(Speck32_KS, XorDiff, ["mk0", "mk1"])
>>> invalid_prop_finder = InvalidPropFinder(ch_model, "btor", solver_seed=0)
>>> inab, ipabm, opabm = 1, ActiveBitMode.SingleBit, ActiveBitMode.SingleBit
>>> ch = next(invalid_prop_finder.find_next_invalidprop_activebitmode(inab, ipabm, opabm))
>>> print(ch) # doctest: +NORMALIZE_WHITESPACE
Characteristic(ch_weight=inf, assignment_weights=[inf, inf, 0],
input_diff=[0x0001, 0x0000], output_diff=[0x0001, 0x0000],
assign_outdiff_list=[0x0000, 0x0001, 0x0000])
"""
smart_print = chsearch._get_smart_print(self.filename)
# initializing the solver
parse_shifts_rotations = True if self.solver_name == "btor" else False
bv2pysmt = functools.partial(
pysmttypes.bv2pysmt, env=self.env, parse_shifts_rotations=parse_shifts_rotations)
solver_kwargs = {}
if self.solver_seed is not None:
if self.solver_name == "btor":
solver_kwargs = {"solver_options": {"seed": int(self.solver_seed) % 2**32}} # btor seed uint32
else:
solver_kwargs = {"random_seed": self.solver_seed}
solver = self.env.factory.Solver(name=self.solver_name, logic=logics.QF_BV, **solver_kwargs)
assert not self.initial_constraints
for c in itertools.chain(self.initial_constraints, self.chmodel_asserts):
solver.add_assertion(bv2pysmt(c, boolean=True))
# setting max/min in/out/ num/mode active bit
in_abmode = input_prop_activebitmode # all variables using in/out
out_abmode = output_prop_activebitmode
if ActiveBitMode.Zero not in [in_abmode, out_abmode] and initial_num_active_bits == 0:
initial_num_active_bits = 1
in_widths = [p.val.width for p in self._ch_model_input_prop]
out_widths = [p.val.width for p in self._ch_model_output_prop]
def abmode2max_min_num_active_bits(my_abmode, my_list_widths):
my_min_num_active_bits = 1
if my_abmode == ActiveBitMode.Default:
my_max_num_active_bits = sum(my_list_widths)
elif my_abmode in [ActiveBitMode.SingleBit, ActiveBitMode.MSBit]:
my_max_num_active_bits = len(my_list_widths)
elif my_abmode == ActiveBitMode.Zero:
my_max_num_active_bits = my_min_num_active_bits = 0
else:
raise ValueError("invalid mode")
return my_max_num_active_bits, my_min_num_active_bits
in_max_num_ab, in_min_num_ab = abmode2max_min_num_active_bits(in_abmode, in_widths)
out_max_num_ab, out_min_num_ab = abmode2max_min_num_active_bits(out_abmode, out_widths)
# max_in_out_active_bits = max_in_active_bits + max_out_active_bits
prop_label = self._ch_model_prop_label # e.g., diff, mask
#
for in_out_num_ab in range(initial_num_active_bits, in_max_num_ab + out_max_num_ab + 1):
for in_num_ab in range(in_min_num_ab, in_max_num_ab + 1):
out_num_ab = in_out_num_ab - in_num_ab
if out_num_ab < out_min_num_ab or out_num_ab > out_max_num_ab:
continue
if self.printing_mode == PrintingMode.Debug:
smart_print(f"Finding input/output {prop_label} with {in_num_ab} input"
f" and {out_num_ab} output active bits", prepend_time=True)
for in_ct_words in _generate_bitvectors(in_widths, in_num_ab, in_abmode):
solver.push()
for var_prop, ct in zip(self._ch_model_input_prop, in_ct_words):
constraint = operation.BvComp(var_prop.val, ct)
solver.add_assertion(bv2pysmt(constraint, boolean=True))
if self.printing_mode == PrintingMode.Debug:
smart_print(f"Fixed input {prop_label} to {in_ct_words}", prepend_time=True)
for out_ct_words in _generate_bitvectors(out_widths, out_num_ab, out_abmode):
solver.push()
for var_prop, ct in zip(self._ch_model_output_prop, out_ct_words):
constraint = operation.BvComp(var_prop.val, ct)
solver.add_assertion(bv2pysmt(constraint, boolean=True))
if self.printing_mode == PrintingMode.Debug:
smart_print(f"Fixed output {prop_label} to {out_ct_words}", prepend_time=True)
satisfiable = solver.solve()
if not satisfiable:
last_ch_found = self._get_uni_inv_ch(in_ct_words, out_ct_words)
if self.check_universally_invalid_ch_found:
self._check(last_ch_found)
yield last_ch_found
solver.pop()
solver.pop()
solver.exit()
def find_next_invalidprop_miss_in_the_middle(
self, ch_model_E0, ch_model_E2,
ch_model_E=None, ch_model_external_E=None,
exclude_zero_input_prop_E0=True,
exclude_zero_input_prop_E2=True,
exclude_zero_input_prop_external_E=None,
):
"""Return an iterator that yields the universally-invalid characteristics found in the SMT+MitM-based search.
This method searches for universally-invalid characteristic using SMT problems
and the miss-in-the-middle approach.
Let :math:`E` be a function split into three functions
:math:`E = E_2 \circ E_1 \circ E_0`.
Let :math:`((p_0, p_1), (p_2, p_3))` denote a *partial* characteristic
over :math:`E`, that is, a characteristic over :math:`E` where:
* :math:`(p_0, p_1)` are the non-zero input and output properties of a
characteristic with probability 1 over :math:`E_0`
* :math:`(p_2, p_3)` are the non-zero input and output properties of a
characteristic with probability 1 over :math:`E_2`
* no relation is imposed between :math:`(p_1, p_2)`, the input and output
properties of :math:`E_1`.
The underlying function of ``self.ch_model`` corresponds to :math:`E_1`,
the underlying function of the `abstractproperty.chmodel.ChModel`
``ch_model_E0`` corresponds to :math:`E_0`,
and the underlying function of the `abstractproperty.chmodel.ChModel`
``ch_model_E2`` corresponds to :math:`E_2`.
The underlying function of the `abstractproperty.chmodel.ChModel`
``ch_model_E`` corresponds to :math:`E`,
but this argument is optional (more on that later).
By default the input properties of ``ch_model_E0`` and
``ch_model_E2`` are excluded to be zero, but this can be
changed with the optional arguments ``exclude_zero_input_prop_*``.
.. note::
This method requires that for any probability-one characteristic
over :math:`E_0` with input-output property :math:`(p_0, p_1)`,
there is no other probability-one characteristic over :math:`E_0`
with input property :math:`p_0` but output property :math:`\\neq p_1`.
Similarly, for any probability-one characteristic
over :math:`E_2` with input-output property :math:`(p_2, p_3)`,
there is no other probability-one characteristic over :math:`E_2`
with output property :math:`p3` but input property :math:`\\neq p_2`.
If :math:`E_0` and :math:`E_2` are permutations, then these two
requirements are satisfied for `Difference` and `LinearMask`
properties.
If the optional argument ``ch_model_external_E`` is given as a
`abstractproperty.chmodel.ChModel` with input and output properties
:math:`(q_0, q_1)`, the definition of a partial characteristic is
extended to :math:`((p_0, p_1), (p_2, p_3), (q_0, q_1)`
such that :math:`(q_0, q_1)` are the input and output properties of a
characteristic with probability 1 where :math:`q_1` is the list of
external variables of :math:`E` (see `SSA`).
If ``ch_model_external_E`` is given,
the argument ``exclude_zero_input_prop_external_E``
that determines whether to exclude non-zero :math:`q_0`
must also be given.
.. note::
The functions :math:`(E_0, E_1, E_2)` can be easily obtained
from a `RoundBasedFunction` :math:`E` that includes
`add_round_outputs` calls in its ``eval``.
For example, obtaining :math:`E_0` from the round ``ns`` to
``ns+ne0`` (``ns`` denoting the initial number of skipped rounds),
:math:`E_1` as the next ``ne1`` rounds, and :math:`E_2`
as the next ``ne2`` rounds can be done as follows:
.. code:: python
[...]
ns, ne0, ne1, ne2 = ...
MyRoundBasedFunction.set_num_rounds(ns+ne0+ne1+ne2)
ch_model_E = ChModel(MyRoundBasedFunction, ...)
rs = ch_model.get_round_separators()
# the end of the i-th round (i=1,2,...) is rs[i-1]
e0_rs, e1_rs = rs[ns+ne0-1], rs[ns+ne0+ne1-1]
ch_model_E0, ch_model_E1, ch_model_E2 = ch_model_E.split([e0_rs, e1_rs])
ch_model_E1 = get_wrapped_chmodel(ch_model_E1) # in case ch_model_E1 2+ non-trivial transitions
invalid_prop_finder = InvalidPropFinder(ch_model_E1, ...)
invalid_prop_finder.find_next_invalidprop_miss_in_the_middle(
ch_model_E0=ch_model_E0, ch_model_E2=ch_model_E2, ch_model_E=ch_model_E)
Alternatively, one can use the function `round_based_invalidprop_search`
which automates the generation of :math:`(E_0, E_1, E_2)`
and applies this method iteratively on the number of rounds.
This method finds universally-invalid characteristics by searching for all
partial characteristics over :math:`E` using `ChFinder.find_next_ch`,
and for each partial characteristic we apply the *check subroutine*
to check whether :math:`p_1` propagates to :math:`p_2` with
zero probability over :math:`E_1`.
The check subroutine is explained in `find_next_invalidprop_activebitmode`.
For each partial characteristic :math:`((p_0, p_1), (p_2, p_3))` found,
if the check subroutine finds that :math:`p_1` propagates to :math:`p_2`
with zero probability, a tuple of 3
`abstractproperty.characteristic.Characteristic` is *yielded*:
* the first characteristic corresponds to the characteristic with probability 1
over :math:`E_0` with input and output properties :math:`(p_0, p_1)`
* the second characteristic corresponds to the universally-invalid characteristic over :math:`E_1`
with input and output properties :math:`(p_1, p_2)`
* the third characteristic corresponds to the characteristic with probability 1
over :math:`E_2` with input and output properties :math:`(p_2, p_3)`
Since the first and third characteristics have probability one,
the concatenation of these three characteristics is a universally-invalid
characteristic over :math:`E` (regardless of the external variables of :math:`E`)
If the optional argument ``ch_model_external_E`` is given,
instead a tuple of 4 characteristic is yieled; the 4-th
characteristic corresponds to the characteristic with probability 1
with input and output properties :math:`(q_0, q_1)`.
In this case, the concatenation of the first 3 characteristics is a universally-invalid
characteristic over :math:`E` *for* the external properties
given by the outputs of the 4-th characteristic.
If the initialization argument ``check_universally_invalid_ch_found`` is ``True``,
all universally-invalid characteristics found over :math:`E_1` in the search
are checked by searching for a valid characteristic with the same
input and output property with `ChFinder.find_next_ch`.
In addition, if the optional argument ``ch_model_E`` is given,
then the universally-invalid characteristic over :math:`E` (the concatenation
of the characteristic founds over :math:`E_0`, :math:`E_1`
and :math:`E_2`) is also checked in a similar way.
>>> # example of search for universally-invalid LinearMask-EncryptionCharacteristic of (wrapped) Speck32
>>> from cascada.linear.mask import LinearMask
>>> from cascada.linear.chmodel import EncryptionChModel
>>> from cascada.smt.invalidpropsearch import InvalidPropFinder
>>> from cascada.primitives import speck
>>> Speck32 = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64)
>>> Speck32.set_num_rounds(3)
>>> ch_model_E = EncryptionChModel(Speck32, LinearMask)
>>> ch_model_E0, ch_model_E1, ch_model_E2 = ch_model_E.split(ch_model_E.get_round_separators())
>>> ch_model_E1 = get_wrapped_chmodel(ch_model_E1)
>>> invalid_prop_finder = InvalidPropFinder(ch_model_E1, "btor", solver_seed=0)
>>> tuple_iterator = invalid_prop_finder.find_next_invalidprop_miss_in_the_middle(ch_model_E0, ch_model_E2)
>>> for i, (pr1_ch_E0, uni_inv_ch_E1, pr1_ch_E2) in enumerate(tuple_iterator):
... print(pr1_ch_E0.srepr(), uni_inv_ch_E1.srepr(), pr1_ch_E2.srepr())
... if i == 2: break
Ch(w=0, id=0000 0001, od=0004 0004) Ch(w=Infinity, id=0004 0004, od=0000 0001) Ch(w=0, id=0000 0001, od=0004 0004)
Ch(w=0, id=0000 0001, od=0004 0004) Ch(w=Infinity, id=0004 0004, od=0080 e001) Ch(w=0, id=0080 e001, od=8002 8003)
Ch(w=0, id=0000 0001, od=0004 0004) Ch(w=Infinity, id=0004 0004, od=0080 f001) Ch(w=0, id=0080 f001, od=c002 c003)
>>> # example of SMT problem of universally-invalid XorDiff-Characteristic of Speck32-KeySchedule
>>> from cascada.differential.difference import XorDiff
>>> from cascada.differential.chmodel import ChModel
>>> Speck32_KS = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64).key_schedule
>>> Speck32_KS.set_num_rounds(3)
>>> ch_model_E = ChModel(Speck32_KS, XorDiff, ["mk0", "mk1", "mk2", "mk3"])
>>> ch_model_E0, ch_model_E1, ch_model_E2 = ch_model_E.split(ch_model_E.get_round_separators())
>>> invalid_prop_finder = InvalidPropFinder(ch_model_E1, "btor", solver_seed=0)
>>> ti = invalid_prop_finder.find_next_invalidprop_miss_in_the_middle(ch_model_E0, ch_model_E2, ch_model_E=ch_model_E)
>>> pr1_ch_E0, uni_inv_ch_E1, pr1_ch_E2 = next(ti)
>>> print(pr1_ch_E0) # doctest: +NORMALIZE_WHITESPACE
Characteristic(ch_weight=0, assignment_weights=[0, 0, 0, 0, 0],
input_diff=[0x0001, 0x0001, 0x0000, 0x0000], output_diff=[0x0001, 0x0000, 0x0001, 0x0000],
assign_outdiff_list=[0x0000, 0x0001, 0x0000, 0x0001, 0x0000])
>>> print(uni_inv_ch_E1) # doctest: +NORMALIZE_WHITESPACE
Characteristic(ch_weight=inf, assignment_weights=[inf, inf, 0, inf, inf],
input_diff=[0x0001, 0x0000, 0x0001, 0x0000], output_diff=[0x0000, 0x8000, 0x0001, 0x0001],
assign_outdiff_list=[0x8000, 0x0000, 0x8000, 0x0001, 0x0001])
>>> print(pr1_ch_E2) # doctest: +NORMALIZE_WHITESPACE
Characteristic(ch_weight=0, assignment_weights=[0, 0, 0, 0, 0],
input_diff=[0x0000, 0x8000, 0x0001, 0x0001], output_diff=[0x0001, 0x0001, 0x8000, 0x8002],
assign_outdiff_list=[0x8000, 0x0001, 0x0001, 0x8000, 0x8002])
"""
# 0. Preliminary checks
assert not (self.check_universally_invalid_ch_found is False and ch_model_E is not None)
assert not (ch_model_external_E is not None and exclude_zero_input_prop_external_E is None)
# no cipher ch model allowed (no need to use self._ch_model_*)
list_aux_ch_model = [self.ch_model, ch_model_E0, ch_model_E2]
list_aux_prone_ch_model = [ch_model_E0, ch_model_E2]
if ch_model_E is not None:
list_aux_ch_model.append(ch_model_E)
if ch_model_external_E is not None:
list_aux_ch_model.append(ch_model_external_E)
list_aux_prone_ch_model.append(ch_model_external_E)
for aux_ch_model in list_aux_ch_model:
if not isinstance(aux_ch_model, abstractproperty.chmodel.ChModel):
raise ValueError("found non-ChModel input")
for i, aux_prone_ch_model in enumerate(list_aux_prone_ch_model):
if i == 0:
name_prone_ch_model = "ch_model_E0"
elif i == 1:
name_prone_ch_model = "ch_model_E2"
else:
assert i == 2
name_prone_ch_model = "ch_model_external_E"
if aux_prone_ch_model.pr_one_assertions() == (core.Constant(0, 1), ):
raise ValueError(f"{name_prone_ch_model}.pr_one_assertions() == False\n{aux_prone_ch_model}")
if aux_prone_ch_model.max_weight(truncate=True) == 0:
warnings.warn(f"{name_prone_ch_model} might contain too many characteristics with probability 1 "
f"since {name_prone_ch_model}.max_weight() is 0 \n{aux_prone_ch_model}")
if ch_model_E0.func.output_widths != self.ch_model.func.input_widths:
raise ValueError(f"outputs widths of ch_model_E0 {ch_model_E0.func.output_widths}"
f" != {self.ch_model.func.input_widths} input widths of self.ch_model")
if ch_model_E2.func.input_widths != self.ch_model.func.output_widths:
raise ValueError(f"input widths of ch_model_E2 {ch_model_E2.func.input_widths}"
f" != {self.ch_model.func.output_widths} output widths of self.ch_model")
# for all Pr.1 ch over E0, there must be a unique output property for each input property
# for all Pr.1 ch over E2, there must be a unique input property for each output property
from cascada.differential.difference import Difference
from cascada.linear.mask import LinearMask
if issubclass(self.ch_model.prop_type, Difference):
# for differentials with Pr. 1, an input property propagates to a unique output property
# E0 automatically valid, E2 needs an inverse
sum_iw, sum_ow = sum(ch_model_E2.func.input_widths), sum(ch_model_E2.func.output_widths)
if sum_iw != sum_ow:
raise ValueError("with the Difference property, E2 needs to be a permutation"
f"but input size = {sum_iw} != {sum_ow} = output size")
pass
if issubclass(self.ch_model.prop_type, LinearMask):
# for hulls with Pr. 1, an output property propagates (backwards) to a unique input property
# E2 automatically valid, E0 needs an inverse
sum_iw, sum_ow = sum(ch_model_E0.func.input_widths), sum(ch_model_E0.func.output_widths)
if sum_iw != sum_ow:
raise ValueError("with the LinearMask property, E0 needs to be a permutation"
f"but input size = {sum_iw} != {sum_ow} = output size")
if ch_model_external_E is not None:
external_props_E0 = set(ch_model_E0.prop_type(v) for v in ch_model_E0.external_var2prop)
external_props_E1 = set(self.ch_model.prop_type(v) for v in self.ch_model.external_var2prop)
external_props_E2 = set(ch_model_E2.prop_type(v) for v in ch_model_E2.external_var2prop)
output_props_external_E = set(ch_model_external_E.output_prop)
if any(isinstance(p.val, core.Constant) for p in self.ch_model.external_var2prop.values()):
raise ValueError(f"ch_model_external_E contains a constant external property"
f"\nch_model_external_E: {ch_model_external_E}")
if not external_props_E1.issubset(output_props_external_E):
raise ValueError(f"E1 contains an external variable not included in ch_model_external_E outputs"
f"\nch. model of E1: {self.ch_model}\nch_model_external_E: {ch_model_external_E}")
external_props_E0_E1_E2 = external_props_E0 | external_props_E1 | external_props_E2
if not set(output_props_external_E).issubset(external_props_E0_E1_E2):
raise ValueError(f"ch_model_external_E contains an output that is not an external property of E"
f"ch_model_external_E: {ch_model_external_E}\nexternal properties of E: {external_props_E0_E1_E2}")
# 1. Initialization of the ChFinder objects
# zero input prop excluded by default in E0 since ch_model_E
# with input/output = (0, non-zero) is always uni-inv for *Diff and LinearMask
# zero input prop also excluded by default in E2 since (non-zero, 0)
# is always uni-inv for permutations with either *Diff or LinearMask
chfinder_E0 = chsearch.ChFinder(
ch_model_E0, chsearch.ChModelAssertType.ProbabilityOne, self.solver_name,
exclude_zero_input_prop=exclude_zero_input_prop_E0,
raise_exception_missing_var=False,
printing_mode=self.printing_mode, filename=self.filename,
solver_seed=self.solver_seed, env=self.env
)
# don't delete chfinder_E2 to avoid destructing shared env
chfinder_E2 = chsearch.ChFinder(
ch_model_E2, chsearch.ChModelAssertType.ProbabilityOne, self.solver_name,
exclude_zero_input_prop=exclude_zero_input_prop_E2,
raise_exception_missing_var=False,
printing_mode=self.printing_mode, filename=self.filename,
solver_seed=self.solver_seed, env=self.env
)
if ch_model_external_E is not None:
chfinder_external_E = chsearch.ChFinder(
ch_model_external_E, chsearch.ChModelAssertType.ProbabilityOne, self.solver_name,
exclude_zero_input_prop=exclude_zero_input_prop_external_E,
raise_exception_missing_var=False,
printing_mode=self.printing_mode, filename=self.filename,
solver_seed=self.solver_seed, env=self.env
)
# 2. Initialization of the solver for the universally-invalid ch E1
bv2pysmt_E1 = functools.partial(
pysmttypes.bv2pysmt, env=self.env,
parse_shifts_rotations=True if self.solver_name == "btor" else False)
solver_E1_kwargs = {}
if self.solver_seed is not None:
if self.solver_name == "btor":
solver_E1_kwargs = {"solver_options": {"seed": int(self.solver_seed) % 2**32}} # btor seed uint32
else:
solver_E1_kwargs = {"random_seed": self.solver_seed}
solver_E1 = self.env.factory.Solver(name=self.solver_name, logic=logics.QF_BV, **solver_E1_kwargs)
assert not self.initial_constraints
for c in itertools.chain(self.initial_constraints, self.chmodel_asserts):
solver_E1.add_assertion(bv2pysmt_E1(c, boolean=True))
# 3. Auxiliary functions
stored_prone_ch_assignment_E0 = []
stored_prone_ch_assignment_E2 = []
def get_next_prone_ch_assignment_E0(my_var2ct):
if len(stored_prone_ch_assignment_E0) > 0:
for my_prone_ch_assignment in stored_prone_ch_assignment_E0:
yield my_prone_ch_assignment
else:
if my_var2ct is not None:
original_initial_constraints = chfinder_E0.initial_constraints[:]
for ext_v in chfinder_E0.ch_model.external_var2prop:
chfinder_E0.initial_constraints.append(operation.BvComp(ext_v, my_var2ct[ext_v]))
for my_prone_ch_assignment in chfinder_E0.find_next_ch(yield_assignment=True):
if my_var2ct is None:
stored_prone_ch_assignment_E0.append(my_prone_ch_assignment)
yield my_prone_ch_assignment
if my_var2ct is not None:
chfinder_E0.initial_constraints = original_initial_constraints
def get_next_prone_ch_assignment_E2(my_var2ct):
if len(stored_prone_ch_assignment_E2) > 0:
for my_prone_ch_assignment in stored_prone_ch_assignment_E2:
yield my_prone_ch_assignment
else:
if my_var2ct is not None:
original_initial_constraints = chfinder_E2.initial_constraints[:]
for ext_v in chfinder_E2.ch_model.external_var2prop:
chfinder_E2.initial_constraints.append(operation.BvComp(ext_v, my_var2ct[ext_v]))
for my_prone_ch_assignment in chfinder_E2.find_next_ch(yield_assignment=True):
if my_var2ct is None:
stored_prone_ch_assignment_E2.append(my_prone_ch_assignment)
yield my_prone_ch_assignment
if my_var2ct is not None:
chfinder_E2.initial_constraints = original_initial_constraints
def get_next_prone_ch_assignment_external_E():
if ch_model_external_E is None:
yield None
else:
for my_prone_ch_assignment in chfinder_external_E.find_next_ch(yield_assignment=True):
yield my_prone_ch_assignment
def check_concatenated_ch(my_prone_ch_E0, my_uni_inv_ch_E1, my_prone_ch_E2, my_prone_ch_external_E):
my_var_prop2ct_prop_E = collections.OrderedDict()
for my_vp_E, my_cp_E0 in zip(ch_model_E.input_prop, my_prone_ch_E0.input_prop):
my_var_prop2ct_prop_E[my_vp_E] = my_cp_E0
for my_vp_E, my_cp_E2 in zip(ch_model_E.output_prop, my_prone_ch_E2.output_prop):
my_var_prop2ct_prop_E[my_vp_E] = my_cp_E2
my_chfinder_E = chsearch.ChFinder(
ch_model_E, assert_type=self.assert_type, solver_name=self.solver_name,
var_prop2ct_prop=my_var_prop2ct_prop_E, raise_exception_missing_var=False,
printing_mode=self.printing_mode, filename=self.filename, solver_seed=self.solver_seed
)
if ch_model_external_E is not None:
for vp_external_E, cp_external_E in zip(ch_model_external_E.output_prop, my_prone_ch_external_E.output_prop):
if vp_external_E.val in my_chfinder_E._vars_in_constraints:
my_chfinder_E.initial_constraints.append(operation.BvComp(vp_external_E.val, cp_external_E.val))
for valid_ch_found_E in my_chfinder_E.find_next_ch():
if ch_model_external_E is not None:
aux_str = f"\n - prone_ch_external_E: {my_prone_ch_external_E}, " \
f"{my_prone_ch_external_E.ch_model}"
else:
aux_str = ""
raise ValueError(
"the concatenation of the last characteristic tuple found,"
f"\n - prone_ch_E0: {my_prone_ch_E0}, {my_prone_ch_E0.ch_model}"
f"\n - uni_inv_ch_E1: {my_uni_inv_ch_E1}, {my_uni_inv_ch_E1.ch_model}"
f"\n - prone_ch_E2: {my_prone_ch_E2}, {my_prone_ch_E2.ch_model}{aux_str}"
f"\n - ch_model E: {ch_model_E}\n - var_prop2ct_prop: {my_var_prop2ct_prop_E}"
f"\n - ch_finder E: {my_chfinder_E.initial_constraints + list(my_chfinder_E.chmodel_asserts)},"
f"\n is not universally-invalid (found compatible valid characteristic over E {valid_ch_found_E})")
del my_chfinder_E
assert self._env == chsearch.environment.get_env()
# 4. Search for probability-one characteristics
smart_print = chsearch._get_smart_print(self.filename)
for prone_ch_assignment_external_E in get_next_prone_ch_assignment_external_E():
assert (ch_model_external_E is None) == (prone_ch_assignment_external_E is None)
if ch_model_external_E is None:
aux_str_E = "", ""
else:
aux_str_E = " (and external E)", f", {prone_ch_assignment_external_E}"
output_var2ct_external_E = collections.OrderedDict()
for out_var_eE in ch_model_external_E.ssa.output_vars:
ct_val_eE = ch_model_external_E.var2prop[out_var_eE].val.xreplace(prone_ch_assignment_external_E)
assert isinstance(ct_val_eE, core.Constant)
output_var2ct_external_E[out_var_eE] = ct_val_eE
constraint_for_E1_from_external_E = True
external_var2ct_E1 = collections.OrderedDict()
for ext_var_E1 in self.ch_model.external_var2prop:
constraint_for_E1_from_external_E &= operation.BvComp(ext_var_E1, output_var2ct_external_E[ext_var_E1])
external_var2ct_E1[ext_var_E1] = output_var2ct_external_E[ext_var_E1]
for prone_ch_assignment_E0 in get_next_prone_ch_assignment_E0(
None if ch_model_external_E is None else output_var2ct_external_E):
ct_outputs_E0 = []
for out_var_E0 in ch_model_E0.ssa.output_vars:
ct_val_E0 = ch_model_E0.var2prop[out_var_E0].val.xreplace(prone_ch_assignment_E0)
assert isinstance(ct_val_E0, core.Constant)
ct_outputs_E0.append(ct_val_E0)
for prone_ch_assignment_E2 in get_next_prone_ch_assignment_E2(
None if ch_model_external_E is None else output_var2ct_external_E):
if self.printing_mode == PrintingMode.Debug:
smart_print(f"Found probability-one characteristics over E0 and E2{aux_str_E[0]}: "
f"{prone_ch_assignment_E0}, {prone_ch_assignment_E2}{aux_str_E[1]}", prepend_time=True)
ct_inputs_E2 = []
for in_var_E2 in ch_model_E2.ssa.input_vars:
ct_val_E2 = ch_model_E2.var2prop[in_var_E2].val.xreplace(prone_ch_assignment_E2)
assert isinstance(ct_val_E2, core.Constant)
ct_inputs_E2.append(ct_val_E2)
constraint_for_E1 = True if ch_model_external_E is None else constraint_for_E1_from_external_E
solution_var2ct_E1 = collections.OrderedDict() if ch_model_external_E is None else external_var2ct_E1.copy()
for var_prop_E1, ct_val_E0 in zip(self.ch_model.input_prop, ct_outputs_E0):
constraint_for_E1 &= operation.BvComp(var_prop_E1.val, ct_val_E0)
solution_var2ct_E1[var_prop_E1.val] = ct_val_E0
for var_prop_E1, ct_val_E2 in zip(self.ch_model.output_prop, ct_inputs_E2):
constraint_for_E1 &= operation.BvComp(var_prop_E1.val, ct_val_E2)
solution_var2ct_E1[var_prop_E1.val] = ct_val_E2
# # debugging
# print("\nfind_next_invalidprop_miss_in_the_middle")
# print("ch_model_E0", ch_model_E0)
# print("ch model E1", self.ch_model)
# if hasattr(self.ch_model, "_unwrapped_ch_model"):
# print("unwrapped ch model E1", self.ch_model._unwrapped_ch_model)
# print("ch_model_E2", ch_model_E2)
# if ch_model_external_E:
# print("ch_model_external_E", ch_model_external_E)
# if ch_model_E:
# print("ch_model_E", ch_model_E)
# print("self.chmodel_asserts:", self.chmodel_asserts)
# print("output_var2ct_external_E:", output_var2ct_external_E)
# print("external_var2ct_E1:", external_var2ct_E1)
# print("constraint_for_E1:", constraint_for_E1, "\n")
#
if not solver_E1.solve([bv2pysmt_E1(constraint_for_E1, boolean=True)]):
uni_inv_ch_E1 = self._get_uni_inv_ch(solution_var2ct=solution_var2ct_E1)
prone_ch_E0 = chfinder_E0._pysmt_model2ch(prone_ch_assignment_E0, is_pysmt_model=False)
prone_ch_E2 = chfinder_E2._pysmt_model2ch(prone_ch_assignment_E2, is_pysmt_model=False)
assert prone_ch_E0.ch_weight == 0, f"{prone_ch_E0}"
assert prone_ch_E2.ch_weight == 0, f"{prone_ch_E2}"
if ch_model_external_E is not None:
prone_ch_external_E = chfinder_external_E._pysmt_model2ch(prone_ch_assignment_external_E, is_pysmt_model=False)
assert prone_ch_external_E.ch_weight == 0, f"{prone_ch_external_E}"
if self.check_universally_invalid_ch_found:
self._check(uni_inv_ch_E1, external_var2ct=None if ch_model_external_E is None else external_var2ct_E1)
if ch_model_E is not None:
check_concatenated_ch(prone_ch_E0, uni_inv_ch_E1, prone_ch_E2,
None if ch_model_external_E is None else prone_ch_external_E)
if ch_model_external_E is not None:
yield prone_ch_E0, uni_inv_ch_E1, prone_ch_E2, prone_ch_external_E
else:
yield prone_ch_E0, uni_inv_ch_E1, prone_ch_E2
else:
# no pr-one ch. found for E2, no need to find another E0
break
solver_E1.exit()
def find_next_invalidprop_quantified_logic(self):
"""Return an iterator that yields the universally-invalid characteristics found in the quantified SMT-based search.
This method searches for universally-invalid characteristic using SMT problems
in the quantified bit-vector logic (with the *ForAll* quantifier).
Let :math:`P(\\alpha, \gamma_1, \dots, \gamma_t, \\beta)` be the
underlying bit-vector formula of the decision problem
of whether there exists a characteristic following the
characteristic model ``ch_model`` with non-zero probability,
where :math:`(\\alpha, \\beta)` is the input and output properties
and :math:`(\gamma_1, \dots, \gamma_t)` are the intermediate properties.
First, this method creates the decision problem of whether there exists
an assignment of the input and output properties :math:`(\\alpha, \\beta)`
such that for all intermediate properties :math:`(\gamma_1, \dots, \gamma_t)`
the negation of :math:`P` is True; in other words, the decision problem
given by the underlying quantified formula
:math:`\exists \\alpha, \\beta, \\forall \gamma_1, \dots, \gamma_t : \ \\neg
P(\\alpha, \gamma_1, \dots, \gamma_t, \\beta)`
If the SMT solver finds the first problem satisfiable,
an assignment of the input and output properties :math:`(\\alpha, \\beta)`
that makes :math:`\\neg P(\\alpha, \gamma_1, \dots, \gamma_t, \\beta) = True` is
obtained, and a universally-invalid `abstractproperty.characteristic.Characteristic`
object is created and *yielded*.
Afterwards, an additional constraint is added to the SMT problem
to exclude the characteristic yielded and this procedure is repeated
until all characteristics are found.
This method requires that the SMT solver given in ``solver_name``
supports the bit-vector logic with quantifiers.
Although the recent version of boolector supports the bit-vector logic
with quantifiers, pySMT does not support yet this recent feature
of boolector.
>>> # example of search for universally-invalid XorDiff-EncryptionCharacteristic of (wrapped) Speck32
>>> from cascada.differential.difference import XorDiff
>>> from cascada.differential.chmodel import EncryptionChModel
>>> from cascada.smt.invalidpropsearch import InvalidPropFinder
>>> from cascada.primitives import speck
>>> Speck32 = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64)
>>> Speck32.set_num_rounds(2)
>>> wrapped_ch_model = get_wrapped_chmodel(EncryptionChModel(Speck32, XorDiff))
>>> invalid_prop_finder = InvalidPropFinder(wrapped_ch_model, "z3", solver_seed=0)
>>> for i, ch in enumerate(invalid_prop_finder.find_next_invalidprop_quantified_logic()):
... print(ch.srepr())
... if i == 2: break # doctest: +ELLIPSIS
Ch(w=Infinity, id=..., od=...)
Ch(w=Infinity, id=..., od=...)
Ch(w=Infinity, id=..., od=...)
>>> # example of SMT problem of universally-invalid RXDiff-Characteristic of Speck32-KeySchedule
>>> from cascada.differential.difference import RXDiff
>>> from cascada.differential.chmodel import ChModel
>>> Speck32_KS = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64).key_schedule
>>> Speck32_KS.set_num_rounds(1)
>>> ch_model = ChModel(Speck32_KS, RXDiff, ["mk0", "mk1"])
>>> invalid_prop_finder = InvalidPropFinder(ch_model, "z3", solver_seed=0)
>>> for i, ch in enumerate(invalid_prop_finder.find_next_invalidprop_quantified_logic()):
... print(ch.srepr())
... if i == 2: break # doctest: +ELLIPSIS
Ch(w=Infinity, id=..., od=...)
Ch(w=Infinity, id=..., od=...)
Ch(w=Infinity, id=..., od=...)
"""
smart_print = chsearch._get_smart_print(self.filename)
# InputOutput contains _input_vars_not_used
in_out_sig_type = abstractproperty.chmodel.ChModelSigType.InputOutput
symbolic_sig = self.ch_model.signature(in_out_sig_type)
# initializing the solver
parse_shifts_rotations = True if self.solver_name == "btor" else False
bv2pysmt = functools.partial(
pysmttypes.bv2pysmt, env=self.env, parse_shifts_rotations=parse_shifts_rotations)
solver_kwargs = {}
if self.solver_seed is not None:
if self.solver_name == "btor":
solver_kwargs = {"solver_options": {"seed": int(self.solver_seed) % 2 ** 32}} # btor seed uint32
else:
solver_kwargs = {"random_seed": self.solver_seed}
solver = self.env.factory.Solver(name=self.solver_name, logic=logics.BV, **solver_kwargs)
#
compact_constraint = True
assert not self.initial_constraints
for c in itertools.chain(self.initial_constraints, self.chmodel_asserts):
compact_constraint &= c
in_out_vars = [p.val for p in itertools.chain(self._ch_model_input_prop, self._ch_model_output_prop)]
forall_vars = [v for v in self._vars_in_constraints if v not in in_out_vars]
pysmt_formula = self.env.formula_manager.ForAll(
[bv2pysmt(v) for v in forall_vars],
bv2pysmt(operation.BvNot(compact_constraint), boolean=True)
)
# # debugging
# print("\nfind_next_invalidprop_quantified_logic")
# print("ch model:", self.ch_model)
# print("compact_constraint:", compact_constraint)
# print("in_out_vars:", in_out_vars)
# print("forall_vars:", forall_vars)
# print("pysmt_formula:", pysmt_formula.serialize(), "\n")
#
solver.add_assertion(pysmt_formula)
last_ch_found = None
while True:
if last_ch_found is not None:
if len(symbolic_sig) == 0:
warnings.warn(f"empty signature of {self.ch_model}")
break
last_ch_sig = last_ch_found.signature(in_out_sig_type)
# disable simplification due to recursion error
with context.Simplification(False):
exclude_last_ch = functools.reduce(
operation.BvOr,
[~operation.BvComp(ss, ls) for ss, ls in zip(symbolic_sig, last_ch_sig)]
)
solver.add_assertion(bv2pysmt(exclude_last_ch, boolean=True))
if self.printing_mode == PrintingMode.Debug:
smart_print(f"exclude_last_ch: {exclude_last_ch}", prepend_time=True)
satisfiable = solver.solve()
if satisfiable:
solution_var2ct = pysmttypes.pysmt_model2bv_model(solver.get_model())
# add missing input-output vars
in_out_vars = [p.val for p in self._ch_model_input_prop + self._ch_model_output_prop]
missing_in_out_vars = []
for v in in_out_vars:
if v not in solution_var2ct:
missing_in_out_vars.append(v)
solution_var2ct[v] = core.Constant(0, v.width)
if missing_in_out_vars and (self.printing_mode != PrintingMode.Silent):
smart_print(f"Found solution of quantified SMT problem for all values of {missing_in_out_vars}; "
f"setting {self.ch_model.prop_type.__name__} of {missing_in_out_vars} "
f"to 0 in yielded universally-invalid characteristic")
last_ch_found = self._get_uni_inv_ch(solution_var2ct=solution_var2ct)
if self.check_universally_invalid_ch_found:
self._check(last_ch_found)
yield last_ch_found
else:
break
solver.exit()
def hrepr(self, full_repr=False):
"""Return a human-readable representation of the base SMT problem.
The base SMT problem is the decision problem of whether there exists
a valid characteristic for an input-output property pair.
In other words, it contains the validity assertions
of the underlying characteristic model.
The methods `InvalidPropFinder.find_next_invalidprop_activebitmode` and
`InvalidPropFinder.find_next_invalidprop_miss_in_the_middle` check
for the unsatisfiability of this base SMT problem
(with some additional constraints),
while `InvalidPropFinder.find_next_invalidprop_quantified_logic`
uses this base SMT problem to create a quantified
bit-vector formula.
If ``full_repr`` is False, the short string representation srepr is used.
"""
return super().hrepr(full_repr=full_repr)
class InvalidCipherPropFinder(InvalidPropFinder):
"""Search for invalid properties of ciphers by modeling the search as a sequence of SMT problems.
Given a characteristic model of a `Cipher`
(`abstractproperty.chmodel.CipherChModel`)
defined for a particular `Property` (e.g., `XorDiff` or `RXDiff`),
this class finds *universally-invalid* cipher characteristics
(`abstractproperty.characteristic.CipherCharacteristic`)
following the characteristic model by modelling the search
as a sequence of SMT problems in the bit-vector theory.
Given a cipher characteristic, let :math:`\\alpha_{KS}` be the input
property of the underlying key-schedule characteristic
and :math:`(\\alpha_{ENC}, \\beta_{ENC})` be the input and output
properties of the underlying encryption characteristic.
A universally-invalid characteristic
over a cipher is a characteristic where
:math:`(\\alpha_{KS}, \\alpha_{ENC})` propagates to :math:`\\beta_{ENC}`
with probability zero regardless of the intermediate properties.
In other words, the input-output property pair
:math:`((\\alpha_{KS}, \\alpha_{ENC}), \\beta_{ENC})`
has zero propagation probability.
.. note::
For the `Difference` property,
a universally-invalid characteristic over a cipher is actually a
related-key impossible differential.
To initialize an `InvalidCipherPropFinder` object, first two auxiliary
instances of `InvalidPropFinder` are created:
- ``ks_finder`` an `InvalidPropFinder` with characteristic model
``ch_model.ks_ch_model``
- ``enc_finder`` an `InvalidPropFinder` with characteristic model
``ch_model.enc_ch_model``
Both ``ks_finder`` and ``enc_finder`` (together with the
`InvalidCipherPropFinder` object) share the arguments `solver_name`,
`printing_mode`, `filename`, `solver_seed` and `env`.
Then, these two auxiliary `InvalidPropFinder` objects are merged into an
`InvalidCipherPropFinder` (which is also an instance of `InvalidPropFinder`)
as follows:
- ``solver_name``, ``printing_mode``, ``filename``, ``solver_seed``
``env`` are the same as the ones from ``ks_finder`` and ``enc_finder``
- ``ch_model`` is set to the characteristic model of the cipher
(a subclass of `abstractproperty.chmodel.CipherChModel`)
- ``chmodel_asserts`` is the union of `chmodel_asserts` of
``ks_finder`` and ``enc_finder``
See also `ChFinder`.
::
>>> from cascada.differential.difference import XorDiff
>>> from cascada.differential.chmodel import CipherChModel
>>> from cascada.smt.invalidpropsearch import InvalidCipherPropFinder, get_wrapped_cipher_chmodel
>>> from cascada.primitives import speck
>>> Speck32 = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64)
>>> Speck32.set_num_rounds(2)
>>> ch_model = get_wrapped_cipher_chmodel(CipherChModel(Speck32, XorDiff))
>>> invalid_prop_finder = InvalidCipherPropFinder(ch_model, "btor", solver_seed=0)
>>> invalid_prop_finder.formula_size()
177
>>> print(invalid_prop_finder.hrepr(full_repr=False)) # doctest: +NORMALIZE_WHITESPACE
; characteristic model assertions
assert (_dk0 == (dk3_out :: dmk1_out)) & ((... & ... & (... ^ ...)) == 0x0000) &
(_dmk1 == dmk1_out) & (((_dmk1 <<< 2) ^ dk1) == dk3_out)
assert PropExtract_{·, 15, 0}(_dk0) == _dk1_out
assert PropExtract_{·, 31, 16}(_dk0) == _dk2_out
assert (_dx0 == (dx9_out :: dx7_out)) & ((... & ...) == 0x0000) & ((... & ...) == 0x0000) &
((dx6 ^ _dk2_out) == dx7_out) & ((((... ^ ...) <<< 2) ^ dx6 ^ _dk2_out) == dx9_out)
assert PropExtract_{·, 15, 0}(_dx0) == _dx1_out
assert PropExtract_{·, 31, 16}(_dx0) == _dx2_out
"""
def __init__(self, ch_model, solver_name, check_universally_invalid_ch_found=True,
printing_mode=PrintingMode.Silent, filename=None,
solver_seed=None, env=None):
assert isinstance(ch_model, abstractproperty.chmodel.CipherChModel)
ks_finder = InvalidPropFinder(
ch_model.ks_ch_model, solver_name=solver_name,
check_universally_invalid_ch_found=check_universally_invalid_ch_found,
printing_mode=PrintingMode.Silent, filename=None, env=env,
)
enc_finder = InvalidPropFinder(
ch_model.enc_ch_model, solver_name=solver_name,
check_universally_invalid_ch_found=check_universally_invalid_ch_found,
printing_mode=PrintingMode.Silent, filename=None, env=ks_finder.env,
)
assert ks_finder.env == enc_finder.env
assert ks_finder.assert_type == chsearch.ChModelAssertType.Validity
assert enc_finder.assert_type == chsearch.ChModelAssertType.Validity
assert ks_finder._ch_weight is None and ks_finder._error == 0
assert enc_finder._ch_weight is None and enc_finder._error == 0
assert ks_finder._exclude_zero_input_prop is False
assert enc_finder._exclude_zero_input_prop is False
ch_weight = None
error = 0
assert_type = chsearch.ChModelAssertType.Validity
exclude_zero_input_prop = False
chmodel_asserts = ks_finder.chmodel_asserts + enc_finder.chmodel_asserts
vars_in_constraints = ks_finder._vars_in_constraints | enc_finder._vars_in_constraints
self.ks_finder = ks_finder
self.enc_finder = enc_finder
self.ch_model = ch_model
self.assert_type = assert_type
self.solver_name = solver_name
self.initial_constraints = []
self.printing_mode = printing_mode
self.filename = filename
self.weight_prefix = None
self.solver_seed = solver_seed
self._env = ks_finder.env
self.chmodel_asserts = chmodel_asserts
# variables not added in docstring (private variables)
self._exclude_zero_input_prop = exclude_zero_input_prop
self._var_prop2ct_prop = {}
self._ch_weight = ch_weight
self._error = error
self._vars_in_constraints = vars_in_constraints
self.check_universally_invalid_ch_found = check_universally_invalid_ch_found
@property
def _ch_model_input_prop(self):
return self.ch_model.ks_ch_model.input_prop + self.ch_model.enc_ch_model.input_prop
@property
def _ch_model_output_prop(self):
return self.ch_model.enc_ch_model.output_prop
@property
def _ch_model_assign_outprop2op_model(self):
return collections.OrderedDict(itertools.chain(
self.ch_model.ks_ch_model.assign_outprop2op_model.items(),
self.ch_model.enc_ch_model.assign_outprop2op_model.items()))
@property
def _ch_model_prop_label(self):
assert self.ch_model.ks_ch_model._prop_label == self.ch_model.enc_ch_model._prop_label
return self.ch_model.ks_ch_model._prop_label
def _pysmt_model2ch(self, solution_var2ct, target_weight=None, is_pysmt_model=True, is_sat=True):
assert target_weight is None
assert is_sat is False
if is_pysmt_model:
solution_var2ct = pysmttypes.pysmt_model2bv_model(solution_var2ct)
else:
solution_var2ct = solution_var2ct.copy()
def _get_needed_vars(my_ch_model):
var_needed = [p.val for p in my_ch_model.input_prop if p.val not in my_ch_model.ssa._input_vars_not_used]
# # ks_ch_model has no external vars and enc_ch_model gets those from ks_ch_model.output
# for ext_var, prop in my_ch_model.external_var2prop.items():
# if not isinstance(prop.val, core.Constant):
# var_needed.append(ext_var)
for outprop, op_model in my_ch_model.assign_outprop2op_model.items():
# if op_model.max_weight() != 0:
if not isinstance(op_model, abstractproperty.opmodel.ModelIdentity):
var_needed.append(outprop.val)
return var_needed
def get_needed_vars(my_cipher_ch_model):
return _get_needed_vars(my_cipher_ch_model.ks_ch_model) + _get_needed_vars(my_cipher_ch_model.enc_ch_model)
missing_signature_vars = []
for v in get_needed_vars(self.ch_model):
if v not in solution_var2ct:
missing_signature_vars.append(v)
solution_var2ct[v] = core.Constant(0, v.width)
# universally-invalid characteristics are invalid regardless of non-input non-output properties
in_out_vars = [p.val for p in self._ch_model_input_prop + self._ch_model_output_prop]
if missing_signature_vars and (
self.printing_mode != PrintingMode.Silent or
any(v in in_out_vars for v in missing_signature_vars)
):
smart_print = chsearch._get_smart_print(self.filename)
smart_print(f"Found {'satisfiable' if is_sat else 'unsatisfiable'} assignment "
f"of SMT problem for all values of {missing_signature_vars}; "
f"setting {self.ch_model.prop_type.__name__} of {missing_signature_vars} "
f"to 0 in yielded characteristic")
# if target_weight is not None and \
# [...]
CipherCharacteristic_cls = self.ch_model.__class__._get_CipherCharacteristic_cls()
init_props = CipherCharacteristic_cls.get_properties_for_initialization(self.ch_model, solution_var2ct)
assert len(init_props) == 6
ks_input_prop, ks_output_prop, ks_assign_outprop_list = init_props[:3]
enc_input_prop, enc_output_prop, enc_assign_outprop_list = init_props[-3:]
# # debugging
# print("InvalidCipherProp._pysmt_model2ch")
# print("ch model:", self.ch_model)
# print("ks ssa:", self.ch_model.ks_ch_model.ssa)
# print("enc ssa:", self.ch_model.enc_ch_model.ssa)
# print("solution_var2ct:", solution_var2ct)
# print("needed vars:", get_needed_vars(self.ch_model))
# print("get_properties_for_initialization():", init_props, "\n")
#
# avoid *_props=*_props (super might not abstract)
last_ch_found = None
for ks_is_sat, enc_is_sat in [[False, False], [False, True], [True, False], [True, True]]:
try:
last_ch_found = CipherCharacteristic_cls(
ks_input_prop,
ks_output_prop,
ks_assign_outprop_list,
enc_input_prop,
enc_output_prop,
enc_assign_outprop_list,
self.ch_model,
# ks_free_props,
# enc_free_props,
ks_empirical_ch_weight=None,
ks_empirical_data_list=None,
enc_empirical_ch_weight=None,
enc_empirical_data_list=None,
ks_is_valid=ks_is_sat,
enc_is_valid=enc_is_sat,
)
except (ValueError, abstractproperty.opmodel.InvalidOpModelError) as e:
if "is_valid" in str(e):
continue
else:
raise e
else:
break
if last_ch_found is not None and [ks_is_sat, enc_is_sat] == [True, True]:
raise ValueError(f"SMT solution {solution_var2ct} leads to a valid characteristic"
f"\n{last_ch_found}")
elif last_ch_found is None:
raise ValueError(f"no characteristic can be built from SMT solution {solution_var2ct}")
assert isinstance(last_ch_found, CipherCharacteristic_cls), f"{last_ch_found}"
# assert not (self.ks_finder.assert_type == chsearch.ChModelAssertType.ProbabilityOne
# [...]
# for i, aux_finder in enumerate([self.ks_finder, self.enc_finder]):
# [...] # _exclude_zero_input_prop
# for var_prop, ct_prop in itertools.chain(
# [...]
# if self._var_prop2ct_prop:
# [...]
# # ignored due to new solution_var2ct
# with context.Simplification(False):
# chmodel_asserts = [a.xreplace(solution_var2ct) for a in self.chmodel_asserts]
# if target_weight is not None:
# [...]
return last_ch_found
def _check(self, invalid_cipher_ch_found):
assert self.ch_model == invalid_cipher_ch_found.cipher_ch_model
if hasattr(self.ch_model, "_unwrapped_cipher_ch_model"):
list_cipher_ch_model = [self.ch_model, self.ch_model._unwrapped_cipher_ch_model]
for v1, v2 in zip(
self.ch_model.enc_ch_model.external_var2prop.values(),
self.ch_model._unwrapped_cipher_ch_model.enc_ch_model.external_var2prop.values()
):
if isinstance(v1.val, core.Constant) or isinstance(v2.val, core.Constant):
assert v1 == v2
else:
list_cipher_ch_model = [self.ch_model]
# avoid self._ch_model_input_prop since we also have self.ch_model._unwrapped_cipher_ch_model
def get_input_prop(ch_or_ch_model):
if isinstance(ch_or_ch_model, abstractproperty.characteristic.CipherCharacteristic):
return ch_or_ch_model.ks_characteristic.input_prop + \
ch_or_ch_model.enc_characteristic.input_prop
else:
assert isinstance(ch_or_ch_model, abstractproperty.chmodel.CipherChModel)
return ch_or_ch_model.ks_ch_model.input_prop + \
ch_or_ch_model.enc_ch_model.input_prop
def get_output_prop(ch_or_ch_model):
if isinstance(ch_or_ch_model, abstractproperty.characteristic.CipherCharacteristic):
return ch_or_ch_model.ks_characteristic.output_prop + \
ch_or_ch_model.enc_characteristic.output_prop
else:
assert isinstance(ch_or_ch_model, abstractproperty.chmodel.CipherChModel)
return ch_or_ch_model.ks_ch_model.output_prop + \
ch_or_ch_model.enc_ch_model.output_prop
for cipher_ch_model in list_cipher_ch_model:
var_prop2ct_prop = collections.OrderedDict()
for vp, cp in zip(get_input_prop(cipher_ch_model), get_input_prop(invalid_cipher_ch_found)):
var_prop2ct_prop[vp] = cp
for vp, cp in zip(get_output_prop(cipher_ch_model), get_output_prop(invalid_cipher_ch_found)):
var_prop2ct_prop[vp] = cp
cipher_ch_finder = chsearch.CipherChFinder(
cipher_ch_model, ks_assert_type=self.assert_type, enc_assert_type=self.assert_type,
solver_name=self.solver_name, var_prop2ct_prop=var_prop2ct_prop, raise_exception_missing_var=False,
printing_mode=self.printing_mode, filename=self.filename, solver_seed=self.solver_seed
)
for valid_cipher_ch_found in cipher_ch_finder.find_next_ch():
raise ValueError(f"last characteristic found {invalid_cipher_ch_found} in the search is not "
f"universally-invalid (found compatible valid characteristic {valid_cipher_ch_found})")
del cipher_ch_finder
assert self._env == chsearch.environment.get_env()
def find_next_invalidprop_activebitmode(self, initial_num_active_bits, input_prop_activebitmode, output_prop_activebitmode):
"""Return an iterator that yields the universally-invalid characteristics found in the SMT-based search
with given `ActiveBitMode`.
This method is similar to `InvalidPropFinder.find_next_invalidprop_activebitmode`;
the only difference is that the input property considered by this method
is the concatenation of the input property of the underlying key-schedule
characteristic and the input property of the underlying encryption
characteristic, and the output property considered by this method
is the output property of the encryption characteristic.
In other words, ``output_prop_activebitmode`` only affects
to the output property of the encryption characteristic
and not to the output property of the key-schedule characteristic.
>>> from cascada.differential.difference import XorDiff
>>> from cascada.differential.chmodel import CipherChModel
>>> from cascada.smt.invalidpropsearch import InvalidCipherPropFinder, ActiveBitMode, get_wrapped_cipher_chmodel
>>> from cascada.primitives import speck
>>> Speck32 = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64)
>>> Speck32.set_num_rounds(2)
>>> ch_model = get_wrapped_cipher_chmodel(CipherChModel(Speck32, XorDiff))
>>> invalid_prop_finder = InvalidCipherPropFinder(ch_model, "btor", solver_seed=0)
>>> inab, ipabm, opabm = 1, ActiveBitMode.MSBit, ActiveBitMode.Zero
>>> for ch in invalid_prop_finder.find_next_invalidprop_activebitmode(inab, ipabm, opabm):
... print(ch.srepr())
Ch(ks_ch=Ch(w=Infinity, id=8000 0000, od=0000 0000), enc_ch=Ch(w=0, id=0000 0000, od=0000 0000))
Ch(ks_ch=Ch(w=Infinity, id=0000 8000, od=0000 0000), enc_ch=Ch(w=0, id=0000 0000, od=0000 0000))
Ch(ks_ch=Ch(w=0, id=0000 0000, od=0000 0000), enc_ch=Ch(w=Infinity, id=8000 0000, od=0000 0000))
Ch(ks_ch=Ch(w=0, id=0000 0000, od=0000 0000), enc_ch=Ch(w=Infinity, id=0000 8000, od=0000 0000))
Ch(ks_ch=Ch(w=Infinity, id=8000 8000, od=0000 0000), enc_ch=Ch(w=0, id=0000 0000, od=0000 0000))
Ch(ks_ch=Ch(w=Infinity, id=8000 0000, od=0000 0000), enc_ch=Ch(w=Infinity, id=8000 0000, od=0000 0000))
Ch(ks_ch=Ch(w=Infinity, id=8000 0000, od=0000 0000), enc_ch=Ch(w=Infinity, id=0000 8000, od=0000 0000))
Ch(ks_ch=Ch(w=0, id=0000 8000, od=8000 8002), enc_ch=Ch(w=Infinity, id=8000 0000, od=0000 0000))
Ch(ks_ch=Ch(w=0, id=0000 8000, od=8000 8002), enc_ch=Ch(w=Infinity, id=0000 8000, od=0000 0000))
Ch(ks_ch=Ch(w=0, id=0000 0000, od=0000 0000), enc_ch=Ch(w=Infinity, id=8000 8000, od=0000 0000))
Ch(ks_ch=Ch(w=Infinity, id=8000 8000, od=0000 0000), enc_ch=Ch(w=Infinity, id=8000 0000, od=0000 0000))
Ch(ks_ch=Ch(w=Infinity, id=8000 8000, od=0000 0000), enc_ch=Ch(w=Infinity, id=0000 8000, od=0000 0000))
Ch(ks_ch=Ch(w=Infinity, id=8000 0000, od=0000 0000), enc_ch=Ch(w=Infinity, id=8000 8000, od=0000 0000))
Ch(ks_ch=Ch(w=0, id=0000 8000, od=8000 8002), enc_ch=Ch(w=Infinity, id=8000 8000, od=0000 0000))
Ch(ks_ch=Ch(w=Infinity, id=8000 8000, od=0000 0000), enc_ch=Ch(w=Infinity, id=8000 8000, od=0000 0000))
"""
return super().find_next_invalidprop_activebitmode(
initial_num_active_bits, input_prop_activebitmode, output_prop_activebitmode
)
def find_next_invalidprop_miss_in_the_middle(self, *args, **kargs):
"""This method is disabled, see `round_based_invalidcipherprop_search` for an alternative."""
raise NotImplementedError("find_next_invalidprop_miss_in_the_middle is disabled in InvalidCipherPropFinder,"
"see round_based_invalidcipherprop_search")
def find_next_invalidprop_quantified_logic(self):
"""Return an iterator that yields the universally-invalid characteristics found in the quantified SMT-based search.
See also `InvalidPropFinder.find_next_invalidprop_quantified_logic`.
>>> from cascada.differential.difference import RXDiff
>>> from cascada.differential.chmodel import CipherChModel
>>> from cascada.smt.invalidpropsearch import InvalidCipherPropFinder, get_wrapped_cipher_chmodel
>>> from cascada.primitives import speck
>>> Speck32 = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64)
>>> Speck32.set_num_rounds(2)
>>> ch_model = get_wrapped_cipher_chmodel(CipherChModel(Speck32, RXDiff))
>>> invalid_prop_finder = InvalidCipherPropFinder(ch_model, "z3", solver_seed=0)
>>> uni_inv_ch = next(invalid_prop_finder.find_next_invalidprop_quantified_logic())
>>> print(uni_inv_ch) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
CipherCharacteristic(ks_characteristic=Characteristic(ch_weight=...,
assignment_weights=[..., ..., ...],
input_diff=[..., ...], output_diff=[..., ...],
assign_outdiff_list=[..., ..., ...]),
enc_characteristic=Characteristic(ch_weight=...,
assignment_weights=[..., ..., ...],
input_diff=[..., ...], output_diff=[..., ...], external_diffs=[..., ...],
assign_outdiff_list=[..., ..., ...]))
"""
return super().find_next_invalidprop_quantified_logic()
def round_based_invalidprop_search(
func, initial_num_rounds, final_num_rounds, prop_type, solver_name,
max_num_skipped_rounds=0, min_num_E0_rounds=1, min_num_E2_rounds=1,
extra_chmodel_args=None, # op_model_class2options,
extra_invalidpropfinder_args=None, # pm, fn, env, solver_seed
exclude_zero_input_prop_E0=True,
exclude_zero_input_prop_E2=True,
**kwargs # exclude_zero_input_prop_external_E, find_cipher_invalid_prop
):
"""Search for zero-probability (invalid) property pairs of round-based functions over multiple number of rounds.
This function searches for universally-invalid characteristics
(leading to invalid properties, see `InvalidPropFinder`)
of a `RoundBasedFunction` ``func``
by modelling the search as a sequence of SMT problems
(using `InvalidPropFinder.find_next_invalidprop_miss_in_the_middle`),
but the search is perfomed iteratively over the number of rounds of ``func``.
That is, first universally-invalid characteristics covering ``initial_num_rounds`` rounds
are searched, then ``initial_num_rounds + 1``, until ``final_num_rounds``.
See also `round_based_ch_search`.
.. note::
The `RoundBasedFunction` ``func`` must include `add_round_outputs`
calls in its ``eval``.
While `InvalidPropFinder` requires wrapping the characteristic model
if it has more than one non-trivial transition, this method does
require the function ``func`` to be not wrapped.
This method also requires that for all the round functions :math:`f_i`
of ``func`` (generated through `SSA.split` with
`SSA.get_round_separators`), given any probability-one
characteristic over :math:`f` with input-output property
:math:`(\\alpha, \\beta)`, then there is no other probability-one
characteristic with input property :math:`\\alpha`
(resp. output property :math:`\\beta`) but output property
:math:`\\neq \\beta` (resp. input property :math:`\\neq \\alpha`).
If all the round functions are permutations, then this is satisfied
for `Difference` and `LinearMask` properties.
See also `InvalidPropFinder.find_next_invalidprop_miss_in_the_middle`.
This function proceed as follows:
1. Set the current number of rounds of the universally-invalid characteristics to search
for to ``initial_num_rounds``.
2. Set the current number of initial rounds to skip to ``0``.
3. Set the number of rounds of ``func`` to the sum of the number of rounds
of step 1 and step 2, and split ``func`` into :math:`E \circ S`
(:math:`S` denotes the skipped rounds and :math:`E` the target function
of the universally-invalid characteristics to search for).
4. Create a `abstractproperty.chmodel.ChModel`
(resp. `abstractproperty.chmodel.EncryptionChModel`) object
of :math:`E` using as arguments ``prop_type`` and ``extra_chmodel_args``.
5. Split :math:`E` into :math:`E = E_2 \circ E_1 \circ E_0`
taking into account ``min_num_E0_rounds, min_num_E2_rounds``
and generate the characteristic models of :math:`(E_0, E_1, E_2)`
using `abstractproperty.chmodel.ChModel.get_round_separators`
and `abstractproperty.chmodel.ChModel.split`.
See `InvalidPropFinder.find_next_invalidprop_miss_in_the_middle`
for more details about :math:`(E_0, E_1, E_2)`.
6. Create an `InvalidPropFinder` object with arguments
the characteristic model over :math:`E_1`,
``solver_name`` and ``extra_invalidpropfinder_args``.
7. Loop over the generator `InvalidPropFinder.find_next_invalidprop_miss_in_the_middle`
(with arguments ``exclude_zero_input_prop_E0``
and ``exclude_zero_input_prop_E2``)
and yield all the 3-length tuples of characteristics from the
generator (together with the current number of rounds).
8. After the generator is exhausted, go to step 5 but splitting :math:`E`
into antoher another partition :math:`(E_0, E_1, E_2)`.
a. If all partitions has been exhausted,
instead increase the current number of initial rounds to skip
(up to ``max_num_skipped_rounds``) and go to step 3.
b. If the current number of skipped rounds was ``max_num_skipped_rounds``,
instead increase the current number of rounds of
the universally-invalid characteristics to search for and go to step 2.
c. If this number was ``final_num_rounds``, instead the search is finished.
This function is a Python `generator` function
(see `InvalidPropFinder`), returning an `iterator` that yields
2-length tuples:
* The first element in the tuple is a 4-length tuple containing
the number of initial skipped rounds, the number of rounds
of :math:`E_0`, the number of rounds of :math:`E_1`
and the number of rounds of :math:`E_2`.
* The second element in the tuple is a 3-length tuple containing
the characteristics over :math:`E_0`, :math:`E_1` and :math:`E_2`
respectively (i.e., the outputs of
`InvalidPropFinder.find_next_invalidprop_miss_in_the_middle`).
Note that these characteristics are
`abstractproperty.characteristic.Characteristic` objects
if ``func`` is a `RoundBasedFunction` object, or
`abstractproperty.characteristic.EncryptionCharacteristic` objects
if ``func`` is a `RoundBasedFunction`-encryption function of a `Cipher`.
The argument ``prop_type`` is a particular `Property` such as `XorDiff`
or `LinearMask`. For ``solver_name``, see `InvalidPropFinder`.
The optional arguments ``extra_chmodel_args`` and ``extra_invalidpropfinder_args``
can be given as dictionaries (in the form of ``**kwargs``) containing
additional arguments for ``ChModel/EncryptionChModel`` and `InvalidPropFinder`
respectively.
It is possible to abort the current search for the current number of rounds
and start the search with one more round by passing the
value `INCREMENT_NUM_ROUNDS`
to the generator iterator with `generator.send`
(see `round_based_ch_search`).
This function reuses information from previous partitions :math:`(E_0', E_1', E_2')`
to directly avoid some new partitions :math:`(E_0, E_1, E_2)` that don't contain
universally-invalid characteristics.
Assume that no universally-invalid characteristic was found for the partition
:math:`(E_0', E_1', E_2')`,
where :math:`E_0'` covers from the :math:`a'`-th round to the :math:`b'`-th
round (i.e., ``a'-›b'``) and :math:`E_2'` covers ``c'-›d'``.
Then it holds that no universally-invalid characteristic can be found
using `InvalidPropFinder.find_next_invalidprop_miss_in_the_middle` from any partition
:math:`(E_0, E_1, E_2)` where :math:`E_0` covers ``a-›a'-›b'-›b`` and
:math:`E_2` covers ``c-›c'-›d'-›d``, that is,
from any partition :math:`(E_0, E_1, E_2)`
where :math:`E_0` covers ``a-›b`` and :math:`E_2` covers ``c-›d``
such that :math:`a \le a', b' \le b, c \le c` and :math:`d' \le d`.
.. note::
Note that `InvalidPropFinder` contains other methods to search
for universally-invalid characteristics (e.g.,
`InvalidPropFinder.find_next_invalidprop_activebitmode` or
`InvalidPropFinder.find_next_invalidprop_quantified_logic`)
which might find universally-invalid characteristics faster.
::
>>> # example of searching for XorDiff universally-invalid Characteristic over a BvFunction
>>> from cascada.differential.difference import XorDiff
>>> from cascada.smt.invalidpropsearch import round_based_invalidprop_search, INCREMENT_NUM_ROUNDS
>>> from cascada.primitives import speck
>>> Speck32_ks = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64).key_schedule
>>> iterator = round_based_invalidprop_search(Speck32_ks, 3, 3, XorDiff, "btor",
... extra_invalidpropfinder_args={"solver_seed":0})
>>> for i, (tuple_rounds, tuple_chs) in enumerate(iterator):
... print(tuple_rounds, ":", ', '.join([ch.srepr() for ch in tuple_chs]))
... if i == 2: break # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
(0, 1, 1, 1) : Ch(w=0, id=..., od=...), Ch(w=Infinity, id=..., od=...), Ch(w=0, id=..., od=...)
(0, 1, 1, 1) : Ch(w=0, id=..., od=...), Ch(w=Infinity, id=..., od=...), Ch(w=0, id=..., od=...)
(0, 1, 1, 1) : Ch(w=0, id=..., od=...), Ch(w=Infinity, id=..., od=...), Ch(w=0, id=..., od=...)
>>> # example of searching for LinearMask universally-invalid EncryptionCharacteristic over a Cipher
>>> from cascada.linear.mask import LinearMask
>>> Speck32 = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64)
>>> iterator = round_based_invalidprop_search(Speck32, 4, 5, LinearMask, "btor",
... min_num_E0_rounds=2, extra_invalidpropfinder_args={"solver_seed":0})
>>> tuple_rounds, tuple_chs = next(iterator)
>>> print(tuple_rounds, ":", ', '.join([ch.srepr() for ch in tuple_chs]))
(0, 2, 1, 1) : Ch(w=0, id=0080 4021, od=0201 0200), Ch(w=Infinity, id=0201 0200, od=0000 0001), Ch(w=0, id=0000 0001, od=0004 0004)
>>> tuple_rounds, tuple_chs = next(iterator)
>>> print(tuple_rounds, ":", ', '.join([ch.srepr() for ch in tuple_chs]))
(0, 2, 1, 1) : Ch(w=0, id=0080 4021, od=0201 0200), Ch(w=Infinity, id=0201 0200, od=0080 e001), Ch(w=0, id=0080 e001, od=8002 8003)
>>> iterator.send(INCREMENT_NUM_ROUNDS) # stop current num_rounds and increment by 1
>>> tuple_rounds, tuple_chs = next(iterator)
>>> print(tuple_rounds, ":", ', '.join([ch.srepr() for ch in tuple_chs]))
(0, 2, 1, 2) : Ch(w=0, id=0080 4021, od=0201 0200), Ch(w=Infinity, id=0201 0200, od=0080 4021), Ch(w=0, id=0080 4021, od=0201 0200)
.. Implementation details:
Example 1.
If there is no 2-›7 ID over E with E0 covering [2-›3] and E2 [6-›7]
then there are no bigger *-›* ID over E with E0 [*-›2-›3] and E2 [6-›7-›*] for any *
This is because in the 2nd case the Pr1-E0 outputs and the Pr-E2 inputs
are a subset of the 1st case.
Example 2.
If there is no ID over E with E0 covering [2-›3] and E2 [6-›7]
then there is no ID over E with E0 covering [2-›3-›4] and E2 [5-›6-›7]
This is because in the 2nd case E0 and E2 fully contains
the E0 and E2 of the 1st case, (1st case contains more Pr.1 than 2nd).
"""
if not (issubclass(func, cascada_ssa.RoundBasedFunction) or
(issubclass(func, blockcipher.Cipher) and
issubclass(func.encryption, cascada_ssa.RoundBasedFunction))):
raise ValueError(f"{func} is not a RoundBasedFunction or a Cipher")
if initial_num_rounds <= 2:
raise ValueError(f"initial_num_rounds ({initial_num_rounds}) must be at least 3")
assert all(isinstance(aux_nr, int) for aux_nr in
[initial_num_rounds, final_num_rounds, max_num_skipped_rounds,
min_num_E0_rounds, min_num_E2_rounds])
assert initial_num_rounds <= final_num_rounds
if extra_chmodel_args is None:
extra_chmodel_args = {}
else:
extra_chmodel_args = extra_chmodel_args.copy()
if extra_invalidpropfinder_args is None:
extra_invalidpropfinder_args = {}
else:
extra_invalidpropfinder_args = extra_invalidpropfinder_args.copy()
printing_mode = extra_invalidpropfinder_args.get("printing_mode", PrintingMode.Silent)
filename = extra_invalidpropfinder_args.get("filename", None)
smart_print = chsearch._get_smart_print(filename)
find_cipher_invalid_prop = kwargs.pop("find_cipher_invalid_prop", False)
exclude_zero_input_prop_external_E = kwargs.pop("exclude_zero_input_prop_external_E", None)
# ignore_trivial_E1 = kwargs.pop("ignore_trivial_E1", False)
assert not (exclude_zero_input_prop_external_E is not None and find_cipher_invalid_prop is False)
if kwargs:
raise ValueError(f"invalid arguments: {kwargs}")
from cascada.differential.difference import XorDiff, RXDiff
from cascada.linear.mask import LinearMask
# from cascada.algebraic.value import BitValue, WordValue
if prop_type in [XorDiff, RXDiff]:
from cascada.differential.chmodel import ChModel, EncryptionChModel, CipherChModel
elif prop_type == LinearMask:
assert find_cipher_invalid_prop is False
from cascada.linear.chmodel import ChModel, EncryptionChModel
# elif prop_type in [BitValue, WordValue]:
# from cascada.algebraic.chmodel import ChModel, EncryptionChModel, CipherChModel
else:
raise ValueError(f"prop_type not in {[XorDiff, RXDiff, LinearMask]}")
#
bad_partitions = []
def get_a_b_c_d_partition(my_num_S_rounds, my_num_E0_rounds, my_num_E1_rounds, my_num_E2_rounds):
"""Get the tuple (a, b, c, d) where a/b is the start/end rounds of E0 and similar for c/d and E2."""
a = my_num_S_rounds
b = my_num_S_rounds + my_num_E0_rounds
c = my_num_S_rounds + my_num_E0_rounds + my_num_E1_rounds
d = my_num_S_rounds + my_num_E0_rounds + my_num_E1_rounds + my_num_E2_rounds
return a, b, c, d, [my_num_S_rounds, my_num_E0_rounds, my_num_E1_rounds, my_num_E2_rounds]
#
num_E_rounds = initial_num_rounds
while True: # loop over num_E_rounds (not a for loop due to INCREMENT_NUM_ROUNDS)
found_invalidprop = False
found_INCREMENT_NUM_ROUNDS = False
for num_S_rounds in range(0, max_num_skipped_rounds + 1):
func.set_num_rounds(num_S_rounds + num_E_rounds)
external_chmodel = None
if issubclass(func, blockcipher.Cipher):
if find_cipher_invalid_prop:
aux_SE_ch_model = CipherChModel(func, prop_type, **extra_chmodel_args)
external_chmodel, SE_ch_model = aux_SE_ch_model.ks_ch_model, aux_SE_ch_model.enc_ch_model
else:
SE_ch_model = EncryptionChModel(func, prop_type, **extra_chmodel_args)
else:
prefix = EncryptionChModel._prefix
input_prop_names = [f"{prefix}p{i}" for i in range(len(func.input_widths))]
SE_ch_model = ChModel(func, prop_type, input_prop_names, **extra_chmodel_args)
if num_S_rounds == 0:
S_ch_model, E_ch_model = None, SE_ch_model
else:
SE_all_rs = SE_ch_model.get_round_separators()
if SE_all_rs is None or len(SE_all_rs) < 2:
raise ValueError(f"{SE_all_rs.func.get_name()} cannot be decomposed in 3 or more rounds")
assert len(SE_all_rs) == num_S_rounds + num_E_rounds - 1
# the end of the i-th round (i=1,2,...) is round_separators[i-1]
S_ch_model, E_ch_model = SE_ch_model.split([SE_all_rs[num_S_rounds - 1]])
# S/E_ch_model.func is not a RoundBasedFunction (no num_rounds, no get_round_separators)
if printing_mode != PrintingMode.Silent:
if num_E_rounds != initial_num_rounds:
smart_print("")
if printing_mode == PrintingMode.Debug:
smart_print("")
smart_print(f"Current number of rounds of (S, E): "
f"({num_S_rounds}, {num_E_rounds})", prepend_time=True)
if printing_mode == PrintingMode.Debug:
if num_S_rounds > 0:
smart_print(f"Characteristic model over E \circ S: {SE_ch_model}")
smart_print(f"Output of S: {SE_all_rs[num_S_rounds - 1]}")
smart_print(f"Characteristic model over S: {S_ch_model}")
smart_print(f"Characteristic model over E: {E_ch_model}")
if external_chmodel:
smart_print(f"External characteristic model (over the key schedule): {external_chmodel}")
for num_E1_rounds in range(1, num_E_rounds - 2 + 1): # - 2 to reserve 1 round for E0 and 1 for E2
aux_num_E0_rounds_num_E2_rounds = []
for num_E0_rounds in range(1, num_E_rounds - num_E1_rounds - 1 + 1): # - 1 to reserve 1 for E2
num_E2_rounds = num_E_rounds - num_E0_rounds - num_E1_rounds
assert num_E2_rounds > 0 and num_E0_rounds + num_E1_rounds + num_E2_rounds == num_E_rounds
if num_E0_rounds < min_num_E0_rounds or num_E2_rounds < min_num_E2_rounds:
continue
aux_num_E0_rounds_num_E2_rounds.append([num_E0_rounds, num_E2_rounds])
# sorting ensure first (E0,E2) pair where each Ei has roughly half the rounds of E0+E2
aux_num_E0_rounds_num_E2_rounds.sort(key=lambda x: (abs(x[0] - x[1]), x[1]))
for num_E0_rounds, num_E2_rounds in aux_num_E0_rounds_num_E2_rounds:
a, b, c, d, _ = get_a_b_c_d_partition(num_S_rounds, num_E0_rounds, num_E1_rounds, num_E2_rounds)
for a_prime, b_prime, c_prime, d_prime, bad_partition in bad_partitions:
if a <= a_prime and b_prime <= b and c <= c_prime and d_prime <= d:
if printing_mode != PrintingMode.Silent:
if printing_mode == PrintingMode.Debug:
smart_print("")
# EX_ch_model.func.get_name() doesn't give useful information
smart_print(f"Ignoring current number of rounds "
f"({num_S_rounds}, {num_E0_rounds}, {num_E1_rounds}, {num_E2_rounds}) ",
f"of (S, E0, E1, E2) due to previous (S', E0', E1', E2') "
f"with number of rounds {tuple(bad_partition)} "
f"that did not contain any universally-invalid characteristics", prepend_time=True)
continue
E_all_rs = E_ch_model.get_round_separators()
if E_all_rs is None or len(E_all_rs) < 2:
raise ValueError(f"{E_ch_model.func.get_name()} cannot be decomposed in 3 or more rounds")
assert len(E_all_rs) == num_E_rounds - 1
E0_rs = E_all_rs[num_E0_rounds - 1]
E1_rs = E_all_rs[num_E0_rounds + num_E1_rounds - 1]
E0_ch_model, E1_ch_model, E2_ch_model = E_ch_model.split([E0_rs, E1_rs])
E1_non_id_opmodels = []
for op_model in E1_ch_model.assign_outprop2op_model.values():
if not isinstance(op_model, abstractproperty.opmodel.ModelIdentity):
E1_non_id_opmodels.append(op_model)
# if ignore_trivial_E1 and len(E1_non_id_opmodels) == 0:
# continue
if len(E1_non_id_opmodels) > 1:
E1_ch_model = get_wrapped_chmodel(E1_ch_model)
if printing_mode != PrintingMode.Silent:
if printing_mode == PrintingMode.Debug:
smart_print(f"\nCharacteristic model over E0: {E0_ch_model}")
smart_print(f"Characteristic model over E1: {E1_ch_model}")
smart_print(f"Characteristic model over E2: {E2_ch_model}")
# EX_ch_model.func.get_name() doesn't give useful information
smart_print(f"Current number of rounds of (E0, E1, E2): "
f"({num_E0_rounds}, {num_E1_rounds}, {num_E2_rounds})", prepend_time=True)
invalid_prop_finder = InvalidPropFinder(E1_ch_model, solver_name, **extra_invalidpropfinder_args)
if printing_mode == PrintingMode.Debug:
smart_print("Size of the base SMT problem:", invalid_prop_finder.formula_size())
smart_print(f"Base SMT problem:\n{invalid_prop_finder.hrepr()}")
iterator = invalid_prop_finder.find_next_invalidprop_miss_in_the_middle
for tuple_chs in iterator(E0_ch_model, E2_ch_model, ch_model_E=E_ch_model, ch_model_external_E=external_chmodel,
exclude_zero_input_prop_E0=exclude_zero_input_prop_E0,
exclude_zero_input_prop_E2=exclude_zero_input_prop_E2,
exclude_zero_input_prop_external_E=exclude_zero_input_prop_external_E):
if num_S_rounds == 0 and find_cipher_invalid_prop:
prone_ch_E0, uni_inv_ch_E1, prone_ch_E2, external_prone_ch_E = tuple_chs
var_prop2ct_prop = collections.OrderedDict()
for vp, cp in itertools.chain(
zip(aux_SE_ch_model.enc_ch_model.input_prop, prone_ch_E0.input_prop),
zip(aux_SE_ch_model.enc_ch_model.output_prop, prone_ch_E2.output_prop),
zip(aux_SE_ch_model.ks_ch_model.input_prop, external_prone_ch_E.input_prop)
):
var_prop2ct_prop[vp] = cp
cipher_ch_finder = chsearch.CipherChFinder(
aux_SE_ch_model, ks_assert_type=chsearch.ChModelAssertType.Validity,
enc_assert_type=chsearch.ChModelAssertType.Validity, solver_name=solver_name,
var_prop2ct_prop=var_prop2ct_prop, raise_exception_missing_var=False,
printing_mode=printing_mode, filename=filename
)
for valid_cipher_ch_found in cipher_ch_finder.find_next_ch():
raise ValueError(
"the concatenation of the last characteristic tuple found,"
f"\n - prone_ch_E0: {prone_ch_E0}, {prone_ch_E0.ch_model}"
f"\n - uni_inv_ch_E1: {uni_inv_ch_E1}, {uni_inv_ch_E1.ch_model}"
f"\n - prone_ch_E2: {prone_ch_E2}, {prone_ch_E2.ch_model}"
f"\n - external_prone_ch_E: {external_prone_ch_E}, {external_prone_ch_E.ch_model}"
f"\n - cipher ch model: {aux_SE_ch_model}\n - var_prop2ct_prop: {var_prop2ct_prop}"
f"\n - cipher_ch_finder: {cipher_ch_finder.initial_constraints+list(cipher_ch_finder.chmodel_asserts)},"
f"\n is not universally-invalid (found compatible valid cipher characteristic {valid_cipher_ch_found})")
del cipher_ch_finder
found_invalidprop = True
tuple_rounds = (num_S_rounds, num_E0_rounds, num_E1_rounds, num_E2_rounds)
sent_value = (yield (tuple_rounds, tuple_chs))
if sent_value is not None:
if sent_value == INCREMENT_NUM_ROUNDS:
found_INCREMENT_NUM_ROUNDS = True
yield None
break
else:
warnings.warn(f"value {sent_value} is sent to the generator "
f"but only sending INCREMENT_NUM_ROUNDS"
f" affects the generator")
if found_INCREMENT_NUM_ROUNDS:
assert found_invalidprop
break
if not found_invalidprop:
assert found_INCREMENT_NUM_ROUNDS is False
bad_partitions.append(get_a_b_c_d_partition(num_S_rounds, num_E0_rounds, num_E1_rounds, num_E2_rounds))
if printing_mode == PrintingMode.Debug:
smart_print("No universally-invalid characteristic found for number of rounds "
f"({num_E0_rounds}, {num_E1_rounds}, {num_E2_rounds}) of (E0, E1, E2)",
prepend_time=True)
if found_INCREMENT_NUM_ROUNDS:
assert found_invalidprop is True
break
if found_INCREMENT_NUM_ROUNDS:
assert found_invalidprop is True
break
if not found_invalidprop or num_E_rounds == final_num_rounds:
break
else:
num_E_rounds += 1
assert num_E_rounds <= final_num_rounds
def round_based_invalidcipherprop_search(
cipher, initial_num_rounds, final_num_rounds, prop_type, solver_name,
max_num_skipped_rounds=0, min_num_E0_rounds=1, min_num_E2_rounds=1,
extra_cipherchmodel_args=None,
extra_invalidcipherpropfinder_args=None,
exclude_zero_input_prop_E0=True,
exclude_zero_input_prop_E2=True,
exclude_zero_input_prop_external_E=True,
# **kwargs
):
"""Search for zero-probability (invalid) properties of iterated ciphers over multiple number of rounds.
.. note::
The `Cipher.encryption` of ``cipher`` must be a
`RoundBasedFunction` including `add_round_outputs`
calls in its ``eval``.
This function is similar to `round_based_invalidprop_search`.
The only differences are:
- The function ``func`` (i.e., :math:`E \circ S`) is the
`Cipher.encryption` of the given ``cipher``.
Thus, :math:`S` denote the skipped rounds of the encryption function.
- Let :math:`K` denote the `Cipher.key_schedule` of ``cipher``, that is,
the function whose outputs are the round keys used in :math:`E \circ S`.
The generator `InvalidPropFinder.find_next_invalidprop_miss_in_the_middle`
is called with the argument ``ch_model_external_E`` given as the
characteristic model over :math:`K` and with the argument
``exclude_zero_input_prop_external_E``.
- This function yields 2-length tuples where the 2nd element is a
4-length tuple; the last characteristic is the characteristic
with probability 1 over :math:`K`.
Thus, the concatenation of the first 3 characteristics is a universally-invalid
characteristic over :math:`E` where the round key properties
are given by the outputs of the probability-one characteristic
over :math:`K`.
Note that initial rounds are only skipped in the encryption function
and not in the key-schedule function.
.. note::
Let ``(tuple_nr, tuple_ch)`` be an element
yielded by `round_based_invalidcipherprop_search`.
Let :math:`\\alpha_{K}` be the input property of the
4-th characteristic in ``tuple_ch``,
and let :math:`(\\alpha_{E}, \\beta_{E})` be the input-output
property pair of the concatenation of the first three characteristic
in ``tuple_ch``.
If ``tuple_nr[0]`` is 0, no initial rounds are skipped,
and :math:`(\\alpha_{K}, \\alpha_{E}) \mapsto \\beta_{E}`
is a universally-invalid cipher characteristic
(as defined in `InvalidCipherPropFinder`) of
``cipher.set_num_rounds_and_return(tuple_nr[1]+tuple_nr[2]+tuple_nr[3])``,
that is, the ``cipher`` with number of rounds
``tuple_nr[1]+tuple_nr[2]+tuple_nr[3]``.
If ``tuple_nr[0]`` is not zero then a universally-invalid cipher characteristic
is also obtained but the underlying cipher is more difficult
to generate due to the skipped initial rounds.
::
>>> from cascada.differential.difference import XorDiff
>>> from cascada.smt.invalidpropsearch import round_based_invalidcipherprop_search
>>> from cascada.primitives import speck
>>> Speck32 = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64)
>>> iterator = round_based_invalidcipherprop_search(Speck32, 3, 3, XorDiff, "btor",
... extra_invalidcipherpropfinder_args={"solver_seed":0})
>>> for i, (tuple_rounds, tuple_chs) in enumerate(iterator):
... print(tuple_rounds, ":", ', '.join([ch.srepr() for ch in tuple_chs]))
... if i == 1: break # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
(0, 1, 1, 1) : Ch(w=0, id=0000 8000, od=8000 8002),
Ch(w=Infinity, id=8000 8002, od=0000 8000),
Ch(w=0, id=0000 8000, od=0002 0000),
Ch(w=0, id=0000 0040 0000, od=0000 8000 8002)
(0, 1, 1, 1) : Ch(w=0, id=0000 8000, od=8000 8002),
Ch(w=Infinity, id=8000 8002, od=0040 8000),
Ch(w=0, id=0040 8000, od=8002 8000),
Ch(w=0, id=0000 0040 0000, od=0000 8000 8002)
.. Implementation details:
A universally-invalid cipher characteristic cannot be returned since
the underlying cipher cannot be generated due to the
skipped rounds.
Initial rounds are not skipped in the key-schedule function
since ``split`` changes the names of the variables and afterwards
these names don't match the round key names in the
encryption characteristics.
The key-schedule and the encryption computations cannot be
easily merged into a single bit-vector function with the ``eval``
method because in that case first the whole key-schedule
would be computed (and thus the whole key-schedule would
be part of the first split characteristic).
"""
return round_based_invalidprop_search(
cipher, initial_num_rounds, final_num_rounds, prop_type, solver_name,
max_num_skipped_rounds=max_num_skipped_rounds,
min_num_E0_rounds=min_num_E0_rounds,
min_num_E2_rounds=min_num_E2_rounds,
extra_chmodel_args=extra_cipherchmodel_args,
extra_invalidpropfinder_args=extra_invalidcipherpropfinder_args,
exclude_zero_input_prop_E0=exclude_zero_input_prop_E0,
exclude_zero_input_prop_E2=exclude_zero_input_prop_E2,
exclude_zero_input_prop_external_E=exclude_zero_input_prop_external_E,
find_cipher_invalid_prop=True,
# **kwargs
)
| [
"itertools.chain",
"enum.auto",
"cascada.smt.chsearch.ChFinder",
"cascada.smt.chsearch.environment.pop_env",
"cascada.smt.chsearch.CipherChFinder",
"itertools.product",
"cascada.smt.wrappedchmodel.get_wrapped_chmodel",
"warnings.warn",
"cascada.bitvector.core.Constant",
"cascada.bitvector.operation.BvNot",
"collections.OrderedDict",
"cascada.smt.chsearch.environment.push_env",
"cascada.linear.chmodel.ChModel",
"cascada.bitvector.context.Simplification",
"cascada.smt.chsearch.environment.get_env",
"cascada.bitvector.operation.BvComp",
"cascada.smt.chsearch._get_smart_print",
"cascada.smt.pysmttypes.pysmt_model2bv_model",
"cascada.linear.chmodel.EncryptionChModel",
"functools.partial",
"cascada.differential.chmodel.CipherChModel"
] | [((693, 728), 'functools.partial', 'functools.partial', (['zip'], {'strict': '(True)'}), '(zip, strict=True)\n', (710, 728), False, 'import functools\n'), ((1278, 1289), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1287, 1289), False, 'import enum\n'), ((1306, 1317), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1315, 1317), False, 'import enum\n'), ((1330, 1341), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1339, 1341), False, 'import enum\n'), ((1353, 1364), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1362, 1364), False, 'import enum\n'), ((100387, 100422), 'cascada.smt.chsearch._get_smart_print', 'chsearch._get_smart_print', (['filename'], {}), '(filename)\n', (100412, 100422), False, 'from cascada.smt import chsearch\n'), ((13477, 13547), 'itertools.chain', 'itertools.chain', (['self._ch_model_input_prop', 'self._ch_model_output_prop'], {}), '(self._ch_model_input_prop, self._ch_model_output_prop)\n', (13492, 13547), False, 'import itertools\n'), ((15068, 15099), 'cascada.smt.chsearch.environment.push_env', 'chsearch.environment.push_env', ([], {}), '()\n', (15097, 15099), False, 'from cascada.smt import chsearch\n'), ((15114, 15144), 'cascada.smt.chsearch.environment.get_env', 'chsearch.environment.get_env', ([], {}), '()\n', (15142, 15144), False, 'from cascada.smt import chsearch\n'), ((15224, 15299), 'functools.partial', 'functools.partial', (['pysmttypes.bv2pysmt'], {'env': 'env', 'parse_shifts_rotations': 'psr'}), '(pysmttypes.bv2pysmt, env=env, parse_shifts_rotations=psr)\n', (15241, 15299), False, 'import functools\n'), ((17745, 17775), 'cascada.smt.chsearch.environment.pop_env', 'chsearch.environment.pop_env', ([], {}), '()\n', (17773, 17775), False, 'from cascada.smt import chsearch\n'), ((24777, 24817), 'cascada.smt.chsearch._get_smart_print', 'chsearch._get_smart_print', (['self.filename'], {}), '(self.filename)\n', (24802, 24817), False, 'from cascada.smt import chsearch\n'), ((24952, 25056), 'functools.partial', 'functools.partial', (['pysmttypes.bv2pysmt'], {'env': 'self.env', 'parse_shifts_rotations': 'parse_shifts_rotations'}), '(pysmttypes.bv2pysmt, env=self.env, parse_shifts_rotations\n =parse_shifts_rotations)\n', (24969, 25056), False, 'import functools\n'), ((25536, 25599), 'itertools.chain', 'itertools.chain', (['self.initial_constraints', 'self.chmodel_asserts'], {}), '(self.initial_constraints, self.chmodel_asserts)\n', (25551, 25599), False, 'import itertools\n'), ((45650, 45942), 'cascada.smt.chsearch.ChFinder', 'chsearch.ChFinder', (['ch_model_E0', 'chsearch.ChModelAssertType.ProbabilityOne', 'self.solver_name'], {'exclude_zero_input_prop': 'exclude_zero_input_prop_E0', 'raise_exception_missing_var': '(False)', 'printing_mode': 'self.printing_mode', 'filename': 'self.filename', 'solver_seed': 'self.solver_seed', 'env': 'self.env'}), '(ch_model_E0, chsearch.ChModelAssertType.ProbabilityOne,\n self.solver_name, exclude_zero_input_prop=exclude_zero_input_prop_E0,\n raise_exception_missing_var=False, printing_mode=self.printing_mode,\n filename=self.filename, solver_seed=self.solver_seed, env=self.env)\n', (45667, 45942), False, 'from cascada.smt import chsearch\n'), ((46090, 46382), 'cascada.smt.chsearch.ChFinder', 'chsearch.ChFinder', (['ch_model_E2', 'chsearch.ChModelAssertType.ProbabilityOne', 'self.solver_name'], {'exclude_zero_input_prop': 'exclude_zero_input_prop_E2', 'raise_exception_missing_var': '(False)', 'printing_mode': 'self.printing_mode', 'filename': 'self.filename', 'solver_seed': 'self.solver_seed', 'env': 'self.env'}), '(ch_model_E2, chsearch.ChModelAssertType.ProbabilityOne,\n self.solver_name, exclude_zero_input_prop=exclude_zero_input_prop_E2,\n raise_exception_missing_var=False, printing_mode=self.printing_mode,\n filename=self.filename, solver_seed=self.solver_seed, env=self.env)\n', (46107, 46382), False, 'from cascada.smt import chsearch\n'), ((47012, 47139), 'functools.partial', 'functools.partial', (['pysmttypes.bv2pysmt'], {'env': 'self.env', 'parse_shifts_rotations': "(True if self.solver_name == 'btor' else False)"}), "(pysmttypes.bv2pysmt, env=self.env, parse_shifts_rotations\n =True if self.solver_name == 'btor' else False)\n", (47029, 47139), False, 'import functools\n'), ((47646, 47709), 'itertools.chain', 'itertools.chain', (['self.initial_constraints', 'self.chmodel_asserts'], {}), '(self.initial_constraints, self.chmodel_asserts)\n', (47661, 47709), False, 'import itertools\n'), ((52527, 52567), 'cascada.smt.chsearch._get_smart_print', 'chsearch._get_smart_print', (['self.filename'], {}), '(self.filename)\n', (52552, 52567), False, 'from cascada.smt import chsearch\n'), ((62593, 62633), 'cascada.smt.chsearch._get_smart_print', 'chsearch._get_smart_print', (['self.filename'], {}), '(self.filename)\n', (62618, 62633), False, 'from cascada.smt import chsearch\n'), ((62963, 63067), 'functools.partial', 'functools.partial', (['pysmttypes.bv2pysmt'], {'env': 'self.env', 'parse_shifts_rotations': 'parse_shifts_rotations'}), '(pysmttypes.bv2pysmt, env=self.env, parse_shifts_rotations\n =parse_shifts_rotations)\n', (62980, 63067), False, 'import functools\n'), ((63591, 63654), 'itertools.chain', 'itertools.chain', (['self.initial_constraints', 'self.chmodel_asserts'], {}), '(self.initial_constraints, self.chmodel_asserts)\n', (63606, 63654), False, 'import itertools\n'), ((13029, 13054), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (13052, 13054), False, 'import collections\n'), ((14329, 14351), 'cascada.bitvector.operation.BvComp', 'operation.BvComp', (['v', 'c'], {}), '(v, c)\n', (14345, 14351), False, 'from cascada.bitvector import operation\n'), ((17615, 17655), 'cascada.smt.chsearch._get_smart_print', 'chsearch._get_smart_print', (['self.filename'], {}), '(self.filename)\n', (17640, 17655), False, 'from cascada.smt import chsearch\n'), ((17791, 17821), 'cascada.smt.chsearch.environment.get_env', 'chsearch.environment.get_env', ([], {}), '()\n', (17819, 17821), False, 'from cascada.smt import chsearch\n'), ((18880, 18905), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (18903, 18905), False, 'import collections\n'), ((19637, 19895), 'cascada.smt.chsearch.ChFinder', 'chsearch.ChFinder', (['ch_model'], {'assert_type': 'self.assert_type', 'solver_name': 'self.solver_name', 'var_prop2ct_prop': 'var_prop2ct_prop', 'raise_exception_missing_var': '(False)', 'printing_mode': 'self.printing_mode', 'filename': 'self.filename', 'solver_seed': 'self.solver_seed'}), '(ch_model, assert_type=self.assert_type, solver_name=self.\n solver_name, var_prop2ct_prop=var_prop2ct_prop,\n raise_exception_missing_var=False, printing_mode=self.printing_mode,\n filename=self.filename, solver_seed=self.solver_seed)\n', (19654, 19895), False, 'from cascada.smt import chsearch\n'), ((20601, 20631), 'cascada.smt.chsearch.environment.get_env', 'chsearch.environment.get_env', ([], {}), '()\n', (20629, 20631), False, 'from cascada.smt import chsearch\n'), ((46520, 46835), 'cascada.smt.chsearch.ChFinder', 'chsearch.ChFinder', (['ch_model_external_E', 'chsearch.ChModelAssertType.ProbabilityOne', 'self.solver_name'], {'exclude_zero_input_prop': 'exclude_zero_input_prop_external_E', 'raise_exception_missing_var': '(False)', 'printing_mode': 'self.printing_mode', 'filename': 'self.filename', 'solver_seed': 'self.solver_seed', 'env': 'self.env'}), '(ch_model_external_E, chsearch.ChModelAssertType.\n ProbabilityOne, self.solver_name, exclude_zero_input_prop=\n exclude_zero_input_prop_external_E, raise_exception_missing_var=False,\n printing_mode=self.printing_mode, filename=self.filename, solver_seed=\n self.solver_seed, env=self.env)\n', (46537, 46835), False, 'from cascada.smt import chsearch\n'), ((50260, 50285), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (50283, 50285), False, 'import collections\n'), ((50616, 50881), 'cascada.smt.chsearch.ChFinder', 'chsearch.ChFinder', (['ch_model_E'], {'assert_type': 'self.assert_type', 'solver_name': 'self.solver_name', 'var_prop2ct_prop': 'my_var_prop2ct_prop_E', 'raise_exception_missing_var': '(False)', 'printing_mode': 'self.printing_mode', 'filename': 'self.filename', 'solver_seed': 'self.solver_seed'}), '(ch_model_E, assert_type=self.assert_type, solver_name=\n self.solver_name, var_prop2ct_prop=my_var_prop2ct_prop_E,\n raise_exception_missing_var=False, printing_mode=self.printing_mode,\n filename=self.filename, solver_seed=self.solver_seed)\n', (50633, 50881), False, 'from cascada.smt import chsearch\n'), ((74743, 74791), 'cascada.smt.pysmttypes.pysmt_model2bv_model', 'pysmttypes.pysmt_model2bv_model', (['solution_var2ct'], {}), '(solution_var2ct)\n', (74774, 74791), False, 'from cascada.smt import pysmttypes\n'), ((76420, 76460), 'cascada.smt.chsearch._get_smart_print', 'chsearch._get_smart_print', (['self.filename'], {}), '(self.filename)\n', (76445, 76460), False, 'from cascada.smt import chsearch\n'), ((81879, 81904), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (81902, 81904), False, 'import collections\n'), ((82232, 82544), 'cascada.smt.chsearch.CipherChFinder', 'chsearch.CipherChFinder', (['cipher_ch_model'], {'ks_assert_type': 'self.assert_type', 'enc_assert_type': 'self.assert_type', 'solver_name': 'self.solver_name', 'var_prop2ct_prop': 'var_prop2ct_prop', 'raise_exception_missing_var': '(False)', 'printing_mode': 'self.printing_mode', 'filename': 'self.filename', 'solver_seed': 'self.solver_seed'}), '(cipher_ch_model, ks_assert_type=self.assert_type,\n enc_assert_type=self.assert_type, solver_name=self.solver_name,\n var_prop2ct_prop=var_prop2ct_prop, raise_exception_missing_var=False,\n printing_mode=self.printing_mode, filename=self.filename, solver_seed=\n self.solver_seed)\n', (82255, 82544), False, 'from cascada.smt import chsearch\n'), ((82956, 82986), 'cascada.smt.chsearch.environment.get_env', 'chsearch.environment.get_env', ([], {}), '()\n', (82984, 82986), False, 'from cascada.smt import chsearch\n'), ((2504, 2523), 'cascada.bitvector.core.Constant', 'core.Constant', (['(0)', 'w'], {}), '(0, w)\n', (2517, 2523), False, 'from cascada.bitvector import core\n'), ((3057, 3086), 'itertools.product', 'itertools.product', (['*iterables'], {}), '(*iterables)\n', (3074, 3086), False, 'import itertools\n'), ((16273, 16317), 'cascada.smt.pysmttypes.pysmt_model2bv_model', 'pysmttypes.pysmt_model2bv_model', (['pysmt_model'], {}), '(pysmt_model)\n', (16304, 16317), False, 'from cascada.smt import pysmttypes\n'), ((41675, 41854), 'warnings.warn', 'warnings.warn', (['f"""{name_prone_ch_model} might contain too many characteristics with probability 1 since {name_prone_ch_model}.max_weight() is 0 \n{aux_prone_ch_model}"""'], {}), '(\n f"""{name_prone_ch_model} might contain too many characteristics with probability 1 since {name_prone_ch_model}.max_weight() is 0 \n{aux_prone_ch_model}"""\n )\n', (41688, 41854), False, 'import warnings\n'), ((52415, 52445), 'cascada.smt.chsearch.environment.get_env', 'chsearch.environment.get_env', ([], {}), '()\n', (52443, 52445), False, 'from cascada.smt import chsearch\n'), ((52980, 53005), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (53003, 53005), False, 'import collections\n'), ((53423, 53448), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (53446, 53448), False, 'import collections\n'), ((63731, 63801), 'itertools.chain', 'itertools.chain', (['self._ch_model_input_prop', 'self._ch_model_output_prop'], {}), '(self._ch_model_input_prop, self._ch_model_output_prop)\n', (63746, 63801), False, 'import itertools\n'), ((64015, 64050), 'cascada.bitvector.operation.BvNot', 'operation.BvNot', (['compact_constraint'], {}), '(compact_constraint)\n', (64030, 64050), False, 'from cascada.bitvector import operation\n'), ((75987, 76012), 'cascada.bitvector.core.Constant', 'core.Constant', (['(0)', 'v.width'], {}), '(0, v.width)\n', (76000, 76012), False, 'from cascada.bitvector import core\n'), ((103068, 103132), 'cascada.linear.chmodel.ChModel', 'ChModel', (['func', 'prop_type', 'input_prop_names'], {}), '(func, prop_type, input_prop_names, **extra_chmodel_args)\n', (103075, 103132), False, 'from cascada.linear.chmodel import ChModel, EncryptionChModel\n'), ((4062, 4081), 'cascada.bitvector.core.Constant', 'core.Constant', (['x', 'n'], {}), '(x, n)\n', (4075, 4081), False, 'from cascada.bitvector import core\n'), ((15579, 15608), 'cascada.bitvector.context.Simplification', 'context.Simplification', (['(False)'], {}), '(False)\n', (15601, 15608), False, 'from cascada.bitvector import context\n'), ((41459, 41478), 'cascada.bitvector.core.Constant', 'core.Constant', (['(0)', '(1)'], {}), '(0, 1)\n', (41472, 41478), False, 'from cascada.bitvector import core\n'), ((53573, 53639), 'cascada.bitvector.operation.BvComp', 'operation.BvComp', (['ext_var_E1', 'output_var2ct_external_E[ext_var_E1]'], {}), '(ext_var_E1, output_var2ct_external_E[ext_var_E1])\n', (53589, 53639), False, 'from cascada.bitvector import operation\n'), ((64629, 64681), 'warnings.warn', 'warnings.warn', (['f"""empty signature of {self.ch_model}"""'], {}), "(f'empty signature of {self.ch_model}')\n", (64642, 64681), False, 'import warnings\n'), ((64864, 64893), 'cascada.bitvector.context.Simplification', 'context.Simplification', (['(False)'], {}), '(False)\n', (64886, 64893), False, 'from cascada.bitvector import context\n'), ((102601, 102653), 'cascada.differential.chmodel.CipherChModel', 'CipherChModel', (['func', 'prop_type'], {}), '(func, prop_type, **extra_chmodel_args)\n', (102614, 102653), False, 'from cascada.differential.chmodel import ChModel, EncryptionChModel, CipherChModel\n'), ((102820, 102876), 'cascada.linear.chmodel.EncryptionChModel', 'EncryptionChModel', (['func', 'prop_type'], {}), '(func, prop_type, **extra_chmodel_args)\n', (102837, 102876), False, 'from cascada.linear.chmodel import ChModel, EncryptionChModel\n'), ((16488, 16526), 'cascada.bitvector.operation.BvComp', 'operation.BvComp', (['model_var', 'model_val'], {}), '(model_var, model_val)\n', (16504, 16526), False, 'from cascada.bitvector import operation\n'), ((27898, 27932), 'cascada.bitvector.operation.BvComp', 'operation.BvComp', (['var_prop.val', 'ct'], {}), '(var_prop.val, ct)\n', (27914, 27932), False, 'from cascada.bitvector import operation\n'), ((55168, 55193), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (55191, 55193), False, 'import collections\n'), ((55397, 55441), 'cascada.bitvector.operation.BvComp', 'operation.BvComp', (['var_prop_E1.val', 'ct_val_E0'], {}), '(var_prop_E1.val, ct_val_E0)\n', (55413, 55441), False, 'from cascada.bitvector import operation\n'), ((55655, 55699), 'cascada.bitvector.operation.BvComp', 'operation.BvComp', (['var_prop_E1.val', 'ct_val_E2'], {}), '(var_prop_E1.val, ct_val_E2)\n', (55671, 55699), False, 'from cascada.bitvector import operation\n'), ((65874, 65899), 'cascada.bitvector.core.Constant', 'core.Constant', (['(0)', 'v.width'], {}), '(0, v.width)\n', (65887, 65899), False, 'from cascada.bitvector import core\n'), ((107910, 107942), 'cascada.smt.wrappedchmodel.get_wrapped_chmodel', 'get_wrapped_chmodel', (['E1_ch_model'], {}), '(E1_ch_model)\n', (107929, 107942), False, 'from cascada.smt.wrappedchmodel import get_wrapped_chmodel, get_wrapped_cipher_chmodel\n'), ((28451, 28485), 'cascada.bitvector.operation.BvComp', 'operation.BvComp', (['var_prop.val', 'ct'], {}), '(var_prop.val, ct)\n', (28467, 28485), False, 'from cascada.bitvector import operation\n'), ((48419, 48460), 'cascada.bitvector.operation.BvComp', 'operation.BvComp', (['ext_v', 'my_var2ct[ext_v]'], {}), '(ext_v, my_var2ct[ext_v])\n', (48435, 48460), False, 'from cascada.bitvector import operation\n'), ((49378, 49419), 'cascada.bitvector.operation.BvComp', 'operation.BvComp', (['ext_v', 'my_var2ct[ext_v]'], {}), '(ext_v, my_var2ct[ext_v])\n', (49394, 49419), False, 'from cascada.bitvector import operation\n'), ((51250, 51304), 'cascada.bitvector.operation.BvComp', 'operation.BvComp', (['vp_external_E.val', 'cp_external_E.val'], {}), '(vp_external_E.val, cp_external_E.val)\n', (51266, 51304), False, 'from cascada.bitvector import operation\n'), ((109757, 109782), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (109780, 109782), False, 'import collections\n'), ((110304, 110610), 'cascada.smt.chsearch.CipherChFinder', 'chsearch.CipherChFinder', (['aux_SE_ch_model'], {'ks_assert_type': 'chsearch.ChModelAssertType.Validity', 'enc_assert_type': 'chsearch.ChModelAssertType.Validity', 'solver_name': 'solver_name', 'var_prop2ct_prop': 'var_prop2ct_prop', 'raise_exception_missing_var': '(False)', 'printing_mode': 'printing_mode', 'filename': 'filename'}), '(aux_SE_ch_model, ks_assert_type=chsearch.\n ChModelAssertType.Validity, enc_assert_type=chsearch.ChModelAssertType.\n Validity, solver_name=solver_name, var_prop2ct_prop=var_prop2ct_prop,\n raise_exception_missing_var=False, printing_mode=printing_mode,\n filename=filename)\n', (110327, 110610), False, 'from cascada.smt import chsearch\n'), ((3288, 3337), 'cascada.bitvector.core.Constant', 'core.Constant', (['(1 << w_combination[counter_w_c])', 'w'], {}), '(1 << w_combination[counter_w_c], w)\n', (3301, 3337), False, 'from cascada.bitvector import core\n'), ((3445, 3464), 'cascada.bitvector.core.Constant', 'core.Constant', (['(0)', 'w'], {}), '(0, w)\n', (3458, 3464), False, 'from cascada.bitvector import core\n'), ((65017, 65041), 'cascada.bitvector.operation.BvComp', 'operation.BvComp', (['ss', 'ls'], {}), '(ss, ls)\n', (65033, 65041), False, 'from cascada.bitvector import operation\n'), ((112399, 112530), 'warnings.warn', 'warnings.warn', (['f"""value {sent_value} is sent to the generator but only sending INCREMENT_NUM_ROUNDS affects the generator"""'], {}), "(\n f'value {sent_value} is sent to the generator but only sending INCREMENT_NUM_ROUNDS affects the generator'\n )\n", (112412, 112530), False, 'import warnings\n')] |
from flask import *
import os
import pickle
from sklearn.externals import joblib
import pandas as pd
import numpy as np
import os
import azureml.train.automl
from sklearn.externals import joblib
from azureml.core.model import Model
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType
input_sample = pd.DataFrame(data=[{'Column3': None, 'Column4': None, 'Column5': 37148.2, 'Column6': 2045.282, 'Column7': None, 'Column8': '', 'Column9': None, 'Column10': None, 'Column11': 7.446882968, 'Column12': None, 'Column13': 100.0, 'Column14': 32.95, 'Column15': 0.515, 'Column16': 8.226, 'Column17': 0.0132580645, 'Column18': 42744.25684, 'Column19': 42.41, 'Column20': 49.09144714, 'Column21': 9.691793869, 'Column22': None, 'Column23': 1.688895637, 'Column24': 1.427532412, 'Column25': 8696587915.0, 'Column26': 39.44102455, 'Column27': 2.611781593, 'Column28': 0.0339, 'Column29': 35.8170301, 'Column30': None, 'Column31': None, 'Column32': 97.17336466, 'Column33': 35.5573706, 'Column34': 44.5027166, 'Column35': 63.37726834, 'Column36': 1728388.673, 'Column37': 331927.5394, 'Column38': 0.1627315206, 'Column39': 40.5605634}])
output_sample = np.array([0])
@input_schema('data', PandasParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data):
try:
result = model.predict(data)
return json.dumps({"result": result.tolist()})
except Exception as e:
result = str(e)
return json.dumps({"error": result})
app = Flask(__name__)
def model_predict(file_path, model_path):
model= joblib.load(model_path)
data= pd.read_csv(file_path)
x=data.drop(columns=['Column3'])
y_test=data['Column3']
y_pred=model.predict(x)
print(y_pred)
print('Deleting File at Path: ' + file_path)
os.remove(file_path)
print('Deleting File at Path - Success - ')
return y_pred
@app.route('/')
def upload():
return render_template("main.html")
@app.route('/predict', methods = ['POST'])
def success():
if request.method == 'POST':
f = request.files['file']
f.save(os.path.join("static/uploads",f.filename))
print('Begin Model Prediction...')
file_path = "static/uploads/"+f.filename
model_path = "static/model/vote.pkl" #Edit model name
preds = model_predict(file_path, model_path)
print('End Model Prediction...')
print(preds)
return render_template("result.html", resGet=preds)
if __name__ == '__main__':
app.run(debug = True) | [
"pandas.read_csv",
"inference_schema.parameter_types.pandas_parameter_type.PandasParameterType",
"sklearn.externals.joblib.load",
"inference_schema.parameter_types.numpy_parameter_type.NumpyParameterType",
"os.path.join",
"numpy.array",
"pandas.DataFrame",
"os.remove"
] | [((513, 1387), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "[{'Column3': None, 'Column4': None, 'Column5': 37148.2, 'Column6': 2045.282,\n 'Column7': None, 'Column8': '', 'Column9': None, 'Column10': None,\n 'Column11': 7.446882968, 'Column12': None, 'Column13': 100.0,\n 'Column14': 32.95, 'Column15': 0.515, 'Column16': 8.226, 'Column17': \n 0.0132580645, 'Column18': 42744.25684, 'Column19': 42.41, 'Column20': \n 49.09144714, 'Column21': 9.691793869, 'Column22': None, 'Column23': \n 1.688895637, 'Column24': 1.427532412, 'Column25': 8696587915.0,\n 'Column26': 39.44102455, 'Column27': 2.611781593, 'Column28': 0.0339,\n 'Column29': 35.8170301, 'Column30': None, 'Column31': None, 'Column32':\n 97.17336466, 'Column33': 35.5573706, 'Column34': 44.5027166, 'Column35':\n 63.37726834, 'Column36': 1728388.673, 'Column37': 331927.5394,\n 'Column38': 0.1627315206, 'Column39': 40.5605634}]"}), "(data=[{'Column3': None, 'Column4': None, 'Column5': 37148.2,\n 'Column6': 2045.282, 'Column7': None, 'Column8': '', 'Column9': None,\n 'Column10': None, 'Column11': 7.446882968, 'Column12': None, 'Column13':\n 100.0, 'Column14': 32.95, 'Column15': 0.515, 'Column16': 8.226,\n 'Column17': 0.0132580645, 'Column18': 42744.25684, 'Column19': 42.41,\n 'Column20': 49.09144714, 'Column21': 9.691793869, 'Column22': None,\n 'Column23': 1.688895637, 'Column24': 1.427532412, 'Column25': \n 8696587915.0, 'Column26': 39.44102455, 'Column27': 2.611781593,\n 'Column28': 0.0339, 'Column29': 35.8170301, 'Column30': None,\n 'Column31': None, 'Column32': 97.17336466, 'Column33': 35.5573706,\n 'Column34': 44.5027166, 'Column35': 63.37726834, 'Column36': \n 1728388.673, 'Column37': 331927.5394, 'Column38': 0.1627315206,\n 'Column39': 40.5605634}])\n", (525, 1387), True, 'import pandas as pd\n'), ((1355, 1368), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1363, 1368), True, 'import numpy as np\n'), ((1394, 1427), 'inference_schema.parameter_types.pandas_parameter_type.PandasParameterType', 'PandasParameterType', (['input_sample'], {}), '(input_sample)\n', (1413, 1427), False, 'from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType\n'), ((1445, 1478), 'inference_schema.parameter_types.numpy_parameter_type.NumpyParameterType', 'NumpyParameterType', (['output_sample'], {}), '(output_sample)\n', (1463, 1478), False, 'from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType\n'), ((1781, 1804), 'sklearn.externals.joblib.load', 'joblib.load', (['model_path'], {}), '(model_path)\n', (1792, 1804), False, 'from sklearn.externals import joblib\n'), ((1816, 1838), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (1827, 1838), True, 'import pandas as pd\n'), ((2008, 2028), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (2017, 2028), False, 'import os\n'), ((2330, 2372), 'os.path.join', 'os.path.join', (['"""static/uploads"""', 'f.filename'], {}), "('static/uploads', f.filename)\n", (2342, 2372), False, 'import os\n')] |
import pickle
import concurrent.futures
import os
import cv2
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class TileTestDataset(CustomDataset):
CLASSES = (0, 1, 2, 3, 4, 5, 6)
def _load_slides_map(self):
self.slides_map = {
'CAM12':
[[720, 1440, 2160, 2880, 3600, 4320, 5040, 5760, 6480, 7200],
[0, 720, 1440, 2160, 2880, 3600, 4320, 5040]],
'CAM3': [[0, 720, 1440, 2160, 2880, 3196],
[0, 720, 1440, 2160, 2600]]
}
self.slides_map = {
'CAM12': [[i, j] for i in self.slides_map['CAM12'][0]
for j in self.slides_map['CAM12'][1]],
'CAM3': [[i, j] for i in self.slides_map['CAM3'][0]
for j in self.slides_map['CAM3'][1]]
}
def _crop_images(self, img_name):
if 'CAM3' in img_name:
slides = self.slides_map['CAM3']
else:
slides = self.slides_map['CAM12']
image = cv2.imread(os.path.join(self.ann_file, img_name))
crop_images = []
for slide in slides:
x, y = slide[0], slide[1]
crop_images.append(image[y:y + self.window_size,
x:x + self.window_size])
return [img_name, crop_images]
def load_images(self, test_img_folder, max_worker=10):
self.all_img_names = os.listdir(test_img_folder)
def __getitem__(self, idx):
img = cv2.imread(os.path.join(self.ann_file, self.all_img_names[idx]))
results = dict(img=img)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def get_test_img_names(self):
return self.all_img_names
def get_slide_sizes(self):
return {
'CAM12': len(self.slides_map['CAM12']),
'CAM3': len(self.slides_map['CAM3'])
}
def get_slides_positions(self, img_name):
if 'CAM3' in img_name:
return self.slides_map['CAM3']
else:
return self.slides_map['CAM12']
def __len__(self):
return len(self.all_img_names)
@DATASETS.register_module()
class TileTestDatasetV2(CustomDataset):
CLASSES = (0, 1, 2, 3, 4, 5, 6)
def load_images(self, test_img_folder, max_worker=10):
self.all_img_names = os.listdir(test_img_folder)
def __getitem__(self, idx):
img_name = self.all_img_names[idx]
image = cv2.imread(os.path.join(self.ann_file, img_name))
center = [image.shape[1] // 2, image.shape[0] // 2]
if 'CAM3' in img_name:
center[1] = int(image.shape[0] * 0.55)
length = int(image.shape[1] * 0.36)
left_up_position = [center[0] - length, max(center[1] - length, 0)]
image = image[max(center[1] -
length, 0):min(image.shape[0], center[1] + length +
1),
center[0] - length:center[0] + length + 1, :]
results = dict(img=image)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def __len__(self):
return len(self.all_img_names)
def get_test_img_names(self):
return self.all_img_names
@DATASETS.register_module()
class TileTestDatasetV3(CustomDataset):
CLASSES = (0, 1, 2, 3, 4, 5, 6)
def load_images(self, test_img_folder, max_worker=10):
self.all_img_names = os.listdir(test_img_folder)
def __getitem__(self, idx):
img_name = self.all_img_names[idx]
image = cv2.imread(os.path.join(self.ann_file, img_name))
results = dict(img=image)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def __len__(self):
return len(self.all_img_names)
def get_test_img_names(self):
return self.all_img_names | [
"os.listdir",
"os.path.join"
] | [((1431, 1458), 'os.listdir', 'os.listdir', (['test_img_folder'], {}), '(test_img_folder)\n', (1441, 1458), False, 'import os\n'), ((2436, 2463), 'os.listdir', 'os.listdir', (['test_img_folder'], {}), '(test_img_folder)\n', (2446, 2463), False, 'import os\n'), ((3627, 3654), 'os.listdir', 'os.listdir', (['test_img_folder'], {}), '(test_img_folder)\n', (3637, 3654), False, 'import os\n'), ((1049, 1086), 'os.path.join', 'os.path.join', (['self.ann_file', 'img_name'], {}), '(self.ann_file, img_name)\n', (1061, 1086), False, 'import os\n'), ((1517, 1569), 'os.path.join', 'os.path.join', (['self.ann_file', 'self.all_img_names[idx]'], {}), '(self.ann_file, self.all_img_names[idx])\n', (1529, 1569), False, 'import os\n'), ((2567, 2604), 'os.path.join', 'os.path.join', (['self.ann_file', 'img_name'], {}), '(self.ann_file, img_name)\n', (2579, 2604), False, 'import os\n'), ((3758, 3795), 'os.path.join', 'os.path.join', (['self.ann_file', 'img_name'], {}), '(self.ann_file, img_name)\n', (3770, 3795), False, 'import os\n')] |
import ipaddress
import os
import subprocess
import tempfile
import unittest
from lib.constants import PROXYCHAINS
class ProxychainsTestCase(unittest.TestCase):
""" Tests for proxychains. """
def test_proxychains(self):
""" Make sure proxychains can actually download something """
try:
os.environ['http_proxy'] = ''
os.environ['https_proxy'] = ''
temp = tempfile.NamedTemporaryFile()
url = 'https://wtfismyip.com/text'
command = '{} wget -O {} -T {} "{}"'.format(PROXYCHAINS, temp.name, 10, url)
subprocess.call(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True)
with open(temp.name) as f:
self.assertTrue(ipaddress.ip_address(f.read().strip()))
temp.close()
except:
self.fail('Error when calling proxychains')
| [
"subprocess.call",
"tempfile.NamedTemporaryFile"
] | [((420, 449), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (447, 449), False, 'import tempfile\n'), ((598, 693), 'subprocess.call', 'subprocess.call', (['command'], {'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.DEVNULL', 'shell': '(True)'}), '(command, stdout=subprocess.DEVNULL, stderr=subprocess.\n DEVNULL, shell=True)\n', (613, 693), False, 'import subprocess\n')] |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Test utilities for the MOE scrubber."""
__author__ = '<EMAIL> (<NAME>)'
import os
import time
from google.apputils import file_util
import gflags as flags
from google.apputils import resources
from moe.scrubber import base
FLAGS = flags.FLAGS
def TestResourceName(name):
"""Return a resource name for a resource under the test data directory."""
prefix = __name__ + ':data/'
return prefix + name
def TestResourceFilename(name):
"""Return the filename of a resource under the test data directory.
Args:
name: A resource name under the test data directory. The path should end in
'/' if name is a directory, which additionally may result in an
expensive zipfile extraction.
Returns:
The name of a file or directory containing the contents of the requested
resource name.
"""
name = TestResourceName(name)
return resources.GetResourceFilename(name.rstrip('/'))
class FakeFile(object):
"""A fake file object that can be useful in unit tests."""
def __init__(self, contents=None, filename=None):
if contents is not None:
self._contents = contents
self.filename = filename or ('%d_%f.txt' % (hash(contents), time.time()))
elif filename is not None:
self._contents = file_util.Read(filename)
self.filename = os.path.basename(filename)
else:
raise base.Error('at least one of file or contents must be specified')
self._contents_filename = os.path.join(
FLAGS.test_tmpdir,
os.path.basename(self.filename))
self.new_contents = contents
self.deleted = False
self.written = False
def Contents(self):
return self._contents.decode('utf-8')
def ContentsFilename(self):
file_util.Write(self._contents_filename, self._contents)
return self._contents_filename
def WriteContents(self, new_contents):
self._contents = new_contents
self.new_contents = new_contents
self.written = True
def Delete(self):
self.deleted = True
| [
"google.apputils.file_util.Write",
"os.path.basename",
"moe.scrubber.base.Error",
"google.apputils.file_util.Read",
"time.time"
] | [((1788, 1844), 'google.apputils.file_util.Write', 'file_util.Write', (['self._contents_filename', 'self._contents'], {}), '(self._contents_filename, self._contents)\n', (1803, 1844), False, 'from google.apputils import file_util\n'), ((1572, 1603), 'os.path.basename', 'os.path.basename', (['self.filename'], {}), '(self.filename)\n', (1588, 1603), False, 'import os\n'), ((1331, 1355), 'google.apputils.file_util.Read', 'file_util.Read', (['filename'], {}), '(filename)\n', (1345, 1355), False, 'from google.apputils import file_util\n'), ((1378, 1404), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1394, 1404), False, 'import os\n'), ((1427, 1491), 'moe.scrubber.base.Error', 'base.Error', (['"""at least one of file or contents must be specified"""'], {}), "('at least one of file or contents must be specified')\n", (1437, 1491), False, 'from moe.scrubber import base\n'), ((1263, 1274), 'time.time', 'time.time', ([], {}), '()\n', (1272, 1274), False, 'import time\n')] |
##############################################################################
# STEP 1:
# CREATING library DATABASE
# FROM file create_library_db.sql
# STEP 2:
# POPULATING library DATABASE
# FROM file populate_library_db.py
##############################################################################
import mysql.connector
from mysql.connector import Error
import re
import os
from os import environ
from populate_library_db import populate_library_db
import psutil
import time
##############################################################################
def create_library_db(filename, connect, cursor):
# reading file create_library_db.sql ...
with open(filename, 'r') as file:
mysql_file = file.read()
# ... and separate each query
mysql_queries = mysql_file.split(';')
# format queries
for k in range(len(mysql_queries)):
mysql_queries[k] = mysql_queries[k] + ";"
del mysql_queries[-1]
for query in mysql_queries:
# execute all queries except SELECT queries at the end of the file
# wich are used for debug and verification
if query.find('SELECT') == -1:
try:
cursor.execute(query)
connect.commit()
except Error as err:
print("MySQL Error message: {}".format(err.msg))
##############################################################################
def create_library():
try:
user = environ.get('MYSQL_USER')
password = environ.get('MYSQL_PASSWORD')
# connect to library database
connect = mysql.connector.connect(
user = user,
password = password,
host = 'localhost',
database = 'library')
except Error as err:
print("MySQL Error message: {}".format(err.msg))
else:
cursor = connect.cursor()
cursor.execute("select database();")
db = cursor.fetchone()
# print("Info: Connected to MySQL database", db)
create_library_db('create_library_db.sql', connect, cursor)
cursor.close()
connect.close()
# print("Info: MySQL sever connection is closed")
##############################################################################
if __name__ == '__main__':
start_time = time.time()
print('1: CREATING library DATABASE ...')
create_library()
step_time1 = time.time()
print('Step 1 running time: ',
'{:.3f}'.format(step_time1 - start_time),
'sec', '\n')
print('2: POPULATING library DATABASE ...')
populate_library_db() # < populate_dewey_classification_db.py
step_time2 = time.time()
print('Step 2 running time: ',
'{:.3f}'.format(step_time2 - step_time1),
'sec', '\n')
print('JOB DONE / Total running time: ',
'{:.3f}'.format(time.time() - start_time),
'sec', '\n')
| [
"populate_library_db.populate_library_db",
"time.time",
"os.environ.get"
] | [((2083, 2094), 'time.time', 'time.time', ([], {}), '()\n', (2092, 2094), False, 'import time\n'), ((2171, 2182), 'time.time', 'time.time', ([], {}), '()\n', (2180, 2182), False, 'import time\n'), ((2325, 2346), 'populate_library_db.populate_library_db', 'populate_library_db', ([], {}), '()\n', (2344, 2346), False, 'from populate_library_db import populate_library_db\n'), ((2401, 2412), 'time.time', 'time.time', ([], {}), '()\n', (2410, 2412), False, 'import time\n'), ((1357, 1382), 'os.environ.get', 'environ.get', (['"""MYSQL_USER"""'], {}), "('MYSQL_USER')\n", (1368, 1382), False, 'from os import environ\n'), ((1396, 1425), 'os.environ.get', 'environ.get', (['"""MYSQL_PASSWORD"""'], {}), "('MYSQL_PASSWORD')\n", (1407, 1425), False, 'from os import environ\n'), ((2571, 2582), 'time.time', 'time.time', ([], {}), '()\n', (2580, 2582), False, 'import time\n')] |
import copy
import numpy as np
import re
import pandas as pd
from functools import wraps
import time
def timeit(f):
@wraps(f)
def wrap(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
print('{:s} took: {:.6f} seconds'.format(f.__name__, te-ts))
return result
return wrap
def allreplace(s):
todelete = ["(", ")"]
for ch in todelete:
s = s.replace(ch, "")
s = s.replace(" - ", " ")
s = s.replace(" ", " ")
s = s.replace(" ", "_")
return s
def partreplace(s):
#todelete = ["(", ")"]
#for ch in todelete:
# s = s.replace(ch, "")
s = s.replace(" - ", " ")
return s.replace(" ", " ")
def read_tissues(infile):
tissues = []
descriptions = []
tstrings = []
with open(infile) as instream:
for l in instream:
lsp = l.split("\t")
if re.search("^#", l):
continue
tissues.append(lsp[1].rstrip())
descriptions.append(lsp[0].rstrip())
tstrings.append(lsp[2].rstrip())
#tstrings = [partreplace(d) for d in descriptions]
descriptions = [allreplace(d) for d in descriptions]
return tissues, descriptions, tstrings
def read_matching_eid(infile):
matches = dict()
with open(infile) as instream:
for line in instream:
if re.search("^#", line):
continue
lsplit = line.split("\t")
tissue = lsplit[1].rstrip()
eids = list()
if len(lsplit) == 3 and not lsplit[2].rstrip() == 'NA':
eids = [x.rstrip() for x in lsplit[2].rstrip().split(",")]
matches[tissue] = eids
return matches
def read_rocfile(infile):
df = pd.read_table(infile, header=0)
nsel = np.array(df.nsel.tolist())
tpr = np.array(df.tpr.tolist())
ppv = np.array(df.ppv.tolist())
valids = np.array(df.valids.tolist())
return nsel, tpr, ppv, valids
@timeit
def get_compatible_snp_dicts(dict1, dict2):
k1 = set(dict1.keys())
k2 = set(dict2.keys())
intersection = k1 & k2
ndict1 = dict()
ndict2 = dict()
for k in intersection:
ndict1[k] = dict1[k]
ndict2[k] = dict2[k]
return ndict1, ndict2
# def get_compatible_snp_dicts_old(dict1, dict2):
# k1 = list(dict1.keys())
# k2 = list(dict2.keys())
# ndict1 = copy.deepcopy(dict1) # takes ~ 1.21 s
# ndict2 = copy.deepcopy(dict2)
# # see if snps in dict1 are in dict2
# for k in k1:
# val2 = ndict2.get(k, None)
# if val2 == None:
# del ndict1[k]
# for k in k2:
# val1 = ndict1.get(k, None)
# if val1 == None:
# del ndict2[k]
# return ndict1, ndict2
| [
"pandas.read_table",
"time.time",
"functools.wraps",
"re.search"
] | [((122, 130), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (127, 130), False, 'from functools import wraps\n'), ((1762, 1793), 'pandas.read_table', 'pd.read_table', (['infile'], {'header': '(0)'}), '(infile, header=0)\n', (1775, 1793), True, 'import pandas as pd\n'), ((171, 182), 'time.time', 'time.time', ([], {}), '()\n', (180, 182), False, 'import time\n'), ((228, 239), 'time.time', 'time.time', ([], {}), '()\n', (237, 239), False, 'import time\n'), ((906, 924), 're.search', 're.search', (['"""^#"""', 'l'], {}), "('^#', l)\n", (915, 924), False, 'import re\n'), ((1377, 1398), 're.search', 're.search', (['"""^#"""', 'line'], {}), "('^#', line)\n", (1386, 1398), False, 'import re\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Sept 3 01:31:07 2019
@author: abibeka
# Get Density on I-95 (After Validation)
"""
#0.0 Housekeeping. Clear variable space
from IPython import get_ipython #run magic commands
ipython = get_ipython()
ipython.magic("reset -f")
ipython = get_ipython()
import os
import pandas as pd
import numpy as np
import subprocess
import seaborn as sns
import matplotlib.pyplot as plt
# Get the time keys to convert Vissim intervals to Hours of the day
PathToVis = r'C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\LPGA\VISSIM-Files'
TimeConvFi = 'TravelTimeKeyValuePairs.xlsx'
TimeConvFi = pd.ExcelFile(os.path.join(PathToVis,TimeConvFi))
TimeConvFi.sheet_names
TimeKeys = TimeConvFi.parse('TimeVissimKey')
# Define Sort Order
I95_Segs = [ 'NB I-95 (US92 to NB OffRamp)',
'NB I-95 (NB OffRamp to NB LoopRamp)',
'NB I-95 ( NB LoopRamp to NB On-Ramp)',
'NB I-95 (NB On-Ramp to SR40)',
'SB I-95 (SR40 to SB OffRamp)',
'SB I-95 (SB OffRamp to SB LoopRamp)',
'SB I-95 (SB LoopRamp to SB On-Ramp)',
'SB I-95 (SB On-Ramp to US92)'
]
TTSegLaneDat.loc[:,'SegName'] = pd.Categorical(TTSegLaneDat.SegName,I95_Segs)
TTSegLaneDat.sort_values('SegName',inplace=True)
# Get the Weighted Density by Segments
def PreProcessVissimDensity(file, SegKeyVal = TTSegLaneDat):
'''
file : VISSIM Results file
SegKeyVal : Key value pair for segment # and TT segment name
Summarize Vissim Travel time results
'''
ExistingAMDat=pd.read_csv(file,sep =';',skiprows=17)
ExistingAMDat.columns
ExistingAMDat.rename(columns={'TRAVTM(ALL)':'VissimTT','VEHS(ALL)':'Veh','DISTTRAV(ALL)':'Len'},inplace=True)
mask=ExistingAMDat["$VEHICLETRAVELTIMEMEASUREMENTEVALUATION:SIMRUN"]=="AVG"
ExistingAMDat = ExistingAMDat[mask]
ExistingAMDat = SegKeyVal.merge(ExistingAMDat,left_on=['SegNO'],right_on = ['VEHICLETRAVELTIMEMEASUREMENT'],how= 'left')
ExistingAMDat.TIMEINT = pd.Categorical(ExistingAMDat.TIMEINT,['900-1800','1800-2700','2700-3600','3600-4500',
'4500-5400','5400-6300','6300-7200','7200-8100',
'8100-9000','9000-9900','9900-10800','10800-11700'])
ExistingAMDat.loc[:,"VissimSMS"] = (ExistingAMDat['Len']/ExistingAMDat['VissimTT']/1.47).round(1)
#Get flow rate and density
ExistingAMDat.loc[:,'FlowRate'] = ExistingAMDat.Veh *4
ExistingAMDat.loc[:,'DensityPerLane'] = (ExistingAMDat.FlowRate/ ExistingAMDat.VissimSMS/ExistingAMDat.NumLanes).round(1)
ExistingAMDat.loc[:,'LenByDensity'] = ExistingAMDat.DensityPerLane *ExistingAMDat.Len
ExistingAMDat.columns
DensityData = ExistingAMDat.groupby(['TIMEINT','SegName'])['Len','LenByDensity'].sum().reset_index()
DensityData.loc[:,'WeightedDensity'] = (DensityData.LenByDensity/ DensityData.Len).round(1)
return(DensityData)
# VISSIM File
#*********************************************************************************
PathToExist = r'C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\LPGA\VISSIM-Files\VISSIM - V2\Existing'
ExistingPMfi = '20834_Existing_PM--C1C2C3C4C5C6C7_Vehicle Travel Time Results.att'
ExistingPMfi = os.path.join(PathToExist,ExistingPMfi)
ExistingAMfi ='20834_Existing_AM--C1C2aC3C4C5C6C7C8_Vehicle Travel Time Results.att'
ExistingAMfi = os.path.join(PathToExist,ExistingAMfi)
file = ExistingAMfi
SegKeyVal = TTSegLaneDat
DenAM = PreProcessVissimTT(ExistingAMfi,SegKeyVal)
DenPM = PreProcessVissimTT(ExistingPMfi,SegKeyVal)
| [
"IPython.get_ipython",
"os.path.join",
"pandas.read_csv",
"pandas.Categorical"
] | [((231, 244), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (242, 244), False, 'from IPython import get_ipython\n'), ((281, 294), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (292, 294), False, 'from IPython import get_ipython\n'), ((1248, 1294), 'pandas.Categorical', 'pd.Categorical', (['TTSegLaneDat.SegName', 'I95_Segs'], {}), '(TTSegLaneDat.SegName, I95_Segs)\n', (1262, 1294), True, 'import pandas as pd\n'), ((3316, 3355), 'os.path.join', 'os.path.join', (['PathToExist', 'ExistingPMfi'], {}), '(PathToExist, ExistingPMfi)\n', (3328, 3355), False, 'import os\n'), ((3455, 3494), 'os.path.join', 'os.path.join', (['PathToExist', 'ExistingAMfi'], {}), '(PathToExist, ExistingAMfi)\n', (3467, 3494), False, 'import os\n'), ((659, 694), 'os.path.join', 'os.path.join', (['PathToVis', 'TimeConvFi'], {}), '(PathToVis, TimeConvFi)\n', (671, 694), False, 'import os\n'), ((1618, 1657), 'pandas.read_csv', 'pd.read_csv', (['file'], {'sep': '""";"""', 'skiprows': '(17)'}), "(file, sep=';', skiprows=17)\n", (1629, 1657), True, 'import pandas as pd\n'), ((2070, 2275), 'pandas.Categorical', 'pd.Categorical', (['ExistingAMDat.TIMEINT', "['900-1800', '1800-2700', '2700-3600', '3600-4500', '4500-5400',\n '5400-6300', '6300-7200', '7200-8100', '8100-9000', '9000-9900',\n '9900-10800', '10800-11700']"], {}), "(ExistingAMDat.TIMEINT, ['900-1800', '1800-2700', '2700-3600',\n '3600-4500', '4500-5400', '5400-6300', '6300-7200', '7200-8100',\n '8100-9000', '9000-9900', '9900-10800', '10800-11700'])\n", (2084, 2275), True, 'import pandas as pd\n')] |
"""Tools for working with async iterators."""
import asyncio
import collections.abc
import itertools
from typing import (
AsyncIterable, AsyncIterator, Awaitable, Iterable, TypeVar)
__all__ = ['aiter', 'anext', 'aiterclose', 'aiter_from_iter', 'product']
T = TypeVar('T')
# Get aiter() and anext() somewhere
try:
from builtins import aiter
except ImportError:
# https://bugs.python.org/issue31861 talks about potentially putting
# aiter() and anext() in the operator module
try:
from operator import aiter
except ImportError:
def aiter(aiterable: AsyncIterable[T]) -> AsyncIterator[T]:
"""Return an async iterator from an async iterable.
If an ``aiter`` function is available as a builtin or in the
:mod:`operator` module, it is imported into
:mod:`async_stagger.aitertools`, and this function will not be
defined.
Only when a stock ``aiter`` is not available will this
function be defined.
Unlike the built-in :func:`iter()`, this only support one argument,
and does not support the two-argument (callable, sentinel) usage.
Adapted from implementation attached to
https://bugs.python.org/issue31861 by <NAME>.
Args:
aiterable: The async iterable.
Returns:
The async iterator produced from the given async iterable.
"""
if not isinstance(aiterable, collections.abc.AsyncIterable):
raise TypeError(
f'{type(aiterable).__name__!r} object '
f'is not asynchronously iterable')
return aiterable.__aiter__()
try:
from builtins import anext
except ImportError:
try:
from operator import anext
except ImportError:
def anext(aiterator: AsyncIterator[T]) -> Awaitable[T]:
"""Return the next item from an async iterator.
If an ``anext`` function is available as a builtin or in the
:mod:`operator` module, it is imported into
:mod:`async_stagger.aitertools`, and this function will not be
defined.
Only when a stock ``anext`` is not available will this
function be defined.
Unlike the built-in :func:`next`, this does not support providing a
default value.
This is a regular function that returns an awaitable, so usually
you should await its result: ``await anext(it)``
Adapted from implementation attached to
https://bugs.python.org/issue31861 by <NAME>.
Args:
aiterator: the async iterator.
Returns:
An awaitable that will return the next item in the iterator.
"""
if not isinstance(aiterator, collections.abc.AsyncIterator):
raise TypeError(f'{type(aiterator).__name__!r} object '
f'is not an asynchronous iterator')
return aiterator.__anext__()
async def aiterclose(aiterator: AsyncIterator):
"""Close the async iterator if possible.
Async generators have an ``aclose()`` method that closes the generator and
cleans up associated resources. Plain async iterators do not have anything
similar, but :pep:`533` suggests adding an ``__aiterclose__()`` method, and having
it called automatically when exiting from an ``async with`` loop.
This function tries to close the async iterator using either method, and
if neither is available, does nothing.
Args:
aiterator: the async iterator to close.
"""
if not isinstance(aiterator, collections.abc.AsyncIterator):
raise TypeError(f'{type(aiterator).__name__!r} object '
f'is not an asynchronous iterator')
if hasattr(aiterator, "__aiterclose__"):
# PEP 533 recommends that "__aiterclose__ calls self.aclose()",
# so we assume it does, and do not call aclose() ourselves
return await aiterator.__aiterclose__()
if hasattr(aiterator, "aclose"):
return await aiterator.aclose()
async def aiter_from_iter(
iterable: Iterable[T],
) -> AsyncIterator[T]:
"""Wrap an async iterator around a regular iterator.
Args:
iterable: a regular iterable.
Returns:
An async iterator yielding the same items as the original iterable.
"""
for item in iterable:
yield item
async def product(
*aiterables: AsyncIterable,
repeat: int = 1,
) -> AsyncIterator:
"""Async version of :func:`itertools.product`.
Compute the cartesian product of input iterables. The arguments are
analogous to its :mod:`itertools` counterpart.
The input async iterables are evaluated lazily. As a result the last
input iterable is iterated and exhausted first, then the next-to-last is
iterated, and so on.
Args:
aiterables: input async iterables.
repeat: used to compute the product of input async iterables with
themselves.
"""
if not isinstance(repeat, int):
raise TypeError(
f'integer argument expected, got {type(repeat).__name__}')
if repeat < 0:
raise ValueError('repeat argument cannot be negative')
if not aiterables:
# no arguments: yield an empty tuple to match itertools.product.
yield ()
return
if repeat == 0:
# yield empty tuple to match itertools.product.
yield ()
return
aiterators = [aiter(a) for a in aiterables]
try:
try:
initial_values = await asyncio.gather(*(anext(a) for a in aiterators))
except StopAsyncIteration:
# some of the aiterators are empty:
# yield nothing to match itertools.product
return
initial_prefix = initial_values * (repeat - 1)
yield tuple(itertools.chain(initial_prefix, initial_values))
rev_values = list([v] for v in reversed(initial_values))
for rev_idx, aiter_to_exhaust in enumerate(reversed(aiterators)):
async for item in aiter_to_exhaust:
rev_values[rev_idx].append(item)
for exhausted_product in itertools.product(
*reversed(rev_values[:rev_idx])):
yield tuple(itertools.chain(
initial_prefix,
initial_values[:-1-rev_idx],
(item,),
exhausted_product,
))
values = list(reversed(rev_values))
prefix_product = itertools.product(*values, repeat=repeat-1)
next(prefix_product)
for prefix in prefix_product:
for p in itertools.product(*values):
yield tuple(itertools.chain(prefix, p))
finally:
for it in aiterators:
await aiterclose(it)
| [
"itertools.chain",
"operator.aiter",
"itertools.product",
"operator.anext",
"typing.TypeVar"
] | [((266, 278), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (273, 278), False, 'from typing import AsyncIterable, AsyncIterator, Awaitable, Iterable, TypeVar\n'), ((5611, 5619), 'operator.aiter', 'aiter', (['a'], {}), '(a)\n', (5616, 5619), False, 'from operator import aiter\n'), ((6693, 6738), 'itertools.product', 'itertools.product', (['*values'], {'repeat': '(repeat - 1)'}), '(*values, repeat=repeat - 1)\n', (6710, 6738), False, 'import itertools\n'), ((6825, 6851), 'itertools.product', 'itertools.product', (['*values'], {}), '(*values)\n', (6842, 6851), False, 'import itertools\n'), ((5978, 6025), 'itertools.chain', 'itertools.chain', (['initial_prefix', 'initial_values'], {}), '(initial_prefix, initial_values)\n', (5993, 6025), False, 'import itertools\n'), ((6881, 6907), 'itertools.chain', 'itertools.chain', (['prefix', 'p'], {}), '(prefix, p)\n', (6896, 6907), False, 'import itertools\n'), ((5715, 5723), 'operator.anext', 'anext', (['a'], {}), '(a)\n', (5720, 5723), False, 'from operator import anext\n'), ((6414, 6508), 'itertools.chain', 'itertools.chain', (['initial_prefix', 'initial_values[:-1 - rev_idx]', '(item,)', 'exhausted_product'], {}), '(initial_prefix, initial_values[:-1 - rev_idx], (item,),\n exhausted_product)\n', (6429, 6508), False, 'import itertools\n')] |
"""`main` is the top level module for your Flask application."""
from __future__ import absolute_import, division, print_function
import logging
from flask import Flask, request
from .message import Message
from .bots.daniel import Daniel
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
""" dummy page for checking that Flask is running"""
return "Hello Python Slack Bot"
@app.route('/daniel', methods=['POST'])
def daniel():
"""Daniel is a BOT, who reacts with calling his name
1) yo
you: daniel yo
daniel: yo
2) echo
you: daniel echo HelloWorld
daniel: HelloWorld
"""
msg = Message.parse(request)
logging.debug(msg)
# prevents infinite loop
if msg.user_name == "slackbot":
return ''
bot = Daniel(msg)
return bot.say()
| [
"logging.debug",
"flask.Flask"
] | [((250, 265), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (255, 265), False, 'from flask import Flask, request\n'), ((677, 695), 'logging.debug', 'logging.debug', (['msg'], {}), '(msg)\n', (690, 695), False, 'import logging\n')] |
import json
import yaml
# =========================================================================
# Data for testing WndLoadQuantitativeCalibration class
# Two sets of quantitative calibration data for demonstration of GUI layout
# Real data will be used when GUI is integrated with the program.
# The data are representing contents of JSON files, so they should be loaded
# using 'json' module.
json_quant_calib_1 = """
{
"name": "Micromatter 41147",
"serial": "v41147",
"description": "GaP 21.2 (Ga=15.4, P=5.8) / CaF2 14.6 / V 26.4 / Mn 17.5 / Co 21.7 / Cu 20.3",
"element_lines": {
"Ga_K": {
"density": 15.4,
"fluorescence": 6.12047035678267e-05
},
"Ga_L": {
"density": 15.4,
"fluorescence": 1.1429814846741588e-05
},
"P_K": {
"density": 5.8,
"fluorescence": 3.177988019213722e-05
},
"F_K": {
"density": 7.105532786885246,
"fluorescence": 1.8688801284649113e-07
},
"Ca_K": {
"density": 7.494467213114753,
"fluorescence": 0.0005815345261894806
},
"V_K": {
"density": 26.4,
"fluorescence": 0.00030309931019669974
},
"Mn_K": {
"density": 17.5,
"fluorescence": 0.0018328847495676865
},
"Co_K": {
"density": 21.7,
"fluorescence": 0.0014660067400157218
},
"Cu_K": {
"density": 20.3,
"fluorescence": 6.435121428993609e-05
}
},
"incident_energy": 12.0,
"detector_channel": "sum",
"scaler_name": "i0",
"distance_to_sample": 1.0,
"creation_time_local": "2020-05-27T18:49:14+00:00",
"source_scan_id": null,
"source_scan_uid": null
}
"""
json_quant_calib_2 = """
{
"name": "Micromatter 41164 Name Is Long So It Has To Be Printed On Multiple Lines (Some More Words To Make The Name Longer)",
"serial": "41164",
"description": "CeF3 21.1 / Au 20.6",
"element_lines": {
"F_K": {
"density": 6.101050068482728,
"fluorescence": 2.1573457185882552e-07
},
"Ce_L": {
"density": 14.998949931517274,
"fluorescence": 0.0014368335445700924
},
"Au_L": {
"density": 20.6,
"fluorescence": 4.4655757003090785e-05
},
"Au_M": {
"density": 20.6,
"fluorescence": 3.611978659032483e-05
}
},
"incident_energy": 12.0,
"detector_channel": "sum",
"scaler_name": "i0",
"distance_to_sample": 2.0,
"creation_time_local": "2020-05-27T18:49:53+00:00",
"source_scan_id": null,
"source_scan_uid": null
}
""" # noqa: E501
# The data is structured the same way as in the actual program code, so transitioning
# to real data will be simple
quant_calib = [
[json.loads(json_quant_calib_1), {"file_path": "/path/to/quantitative/calibration/file/standard_41147.json"}],
[
json.loads(json_quant_calib_2),
{
"file_path": "/extremely/long/path/to"
"/quantitative/calibration/file/so/it/had/to/be/"
"printed/on/multiple/lines/standard_41164.json"
},
],
]
# The following list is to demonstrate how 'View' button works. Data is treated
# differently in the actual code, but the resulting format will be similar.
quant_calib_json = [
yaml.dump(quant_calib[0][0], default_flow_style=False, sort_keys=False, indent=4),
yaml.dump(quant_calib[1][0], default_flow_style=False, sort_keys=False, indent=4),
]
| [
"json.loads",
"yaml.dump"
] | [((3822, 3907), 'yaml.dump', 'yaml.dump', (['quant_calib[0][0]'], {'default_flow_style': '(False)', 'sort_keys': '(False)', 'indent': '(4)'}), '(quant_calib[0][0], default_flow_style=False, sort_keys=False,\n indent=4)\n', (3831, 3907), False, 'import yaml\n'), ((3909, 3994), 'yaml.dump', 'yaml.dump', (['quant_calib[1][0]'], {'default_flow_style': '(False)', 'sort_keys': '(False)', 'indent': '(4)'}), '(quant_calib[1][0], default_flow_style=False, sort_keys=False,\n indent=4)\n', (3918, 3994), False, 'import yaml\n'), ((3278, 3308), 'json.loads', 'json.loads', (['json_quant_calib_1'], {}), '(json_quant_calib_1)\n', (3288, 3308), False, 'import json\n'), ((3402, 3432), 'json.loads', 'json.loads', (['json_quant_calib_2'], {}), '(json_quant_calib_2)\n', (3412, 3432), False, 'import json\n')] |
# Import libraries
import RPi.GPIO as GPIO
import time
# Set GPIO numbering mode
GPIO.setmode(GPIO.BOARD)
# Set pin 11 as an output, and set servo1 as pin 11 as PWM
GPIO.setup(11,GPIO.OUT)
servo1 = GPIO.PWM(11,50) # Note 11 is pin, 50 = 50Hz pulse
GPIO.setup(13,GPIO.OUT)
servo2 = GPIO.PWM(13,50) # Note 11 is pin, 50 = 50Hz pulse
#start PWM running, but with value of 0 (pulse off)
servo1.start(0)
servo2.start(0)
print ("Waiting for 2 seconds")
# Define variable duty
#duty = 2
# Loop for duty values from 2 to 12 (0 to 180 degrees)
#while duty <= 12:
# servo1.ChangeDutyCycle(duty)
# time.sleep(1)
# duty = duty + 1
# Wait a couple of seconds
time.sleep(2)
while True:
# Turn back to 90 degrees
print ("Turning back to 90 degrees for 2 seconds")
servo1.ChangeDutyCycle(2)
servo2.ChangeDutyCycle(7)
time.sleep(0.4)
#turn back to 0 degrees
print ("Turning back to 0 degrees")
servo1.ChangeDutyCycle(7)
servo2.ChangeDutyCycle(2)
time.sleep(0.4)
#Clean things up at the end
servo1.stop()
GPIO.cleanup()
print ("Goodbye")
| [
"RPi.GPIO.cleanup",
"RPi.GPIO.setup",
"time.sleep",
"RPi.GPIO.PWM",
"RPi.GPIO.setmode"
] | [((84, 108), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (96, 108), True, 'import RPi.GPIO as GPIO\n'), ((169, 193), 'RPi.GPIO.setup', 'GPIO.setup', (['(11)', 'GPIO.OUT'], {}), '(11, GPIO.OUT)\n', (179, 193), True, 'import RPi.GPIO as GPIO\n'), ((202, 218), 'RPi.GPIO.PWM', 'GPIO.PWM', (['(11)', '(50)'], {}), '(11, 50)\n', (210, 218), True, 'import RPi.GPIO as GPIO\n'), ((252, 276), 'RPi.GPIO.setup', 'GPIO.setup', (['(13)', 'GPIO.OUT'], {}), '(13, GPIO.OUT)\n', (262, 276), True, 'import RPi.GPIO as GPIO\n'), ((285, 301), 'RPi.GPIO.PWM', 'GPIO.PWM', (['(13)', '(50)'], {}), '(13, 50)\n', (293, 301), True, 'import RPi.GPIO as GPIO\n'), ((666, 679), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (676, 679), False, 'import time\n'), ((1051, 1065), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (1063, 1065), True, 'import RPi.GPIO as GPIO\n'), ((842, 857), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (852, 857), False, 'import time\n'), ((990, 1005), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (1000, 1005), False, 'import time\n')] |
from flask import Flask
import random
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://rohan:1234@database:5432/rand_db'
db = SQLAlchemy(app)
class RandomNumbers(db.Model):
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer)
@app.route('/')
def generate_random_number():
r_number = random.randint(0,100)
random_number = RandomNumbers(number=r_number)
db.session.add(random_number)
db.session.commit()
return str(r_number)
if __name__ == "__main__":
db.create_all()
app.run(host="0.0.0.0") | [
"flask_sqlalchemy.SQLAlchemy",
"random.randint",
"flask.Flask"
] | [((85, 100), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (90, 100), False, 'from flask import Flask\n'), ((268, 283), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (278, 283), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((462, 484), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (476, 484), False, 'import random\n')] |
import hashlib
hash_input = "ffykfhsq"
found_hash = False
number = 0
numbers_left = 8
while numbers_left > 0:
hash_attempt = (hash_input + str(number)).encode("utf8")
hash_result = hashlib.md5(hash_attempt).hexdigest()
if hash_result[0:5] == "00000":
print(hash_result[5])
numbers_left -= 1
number += 1
| [
"hashlib.md5"
] | [((192, 217), 'hashlib.md5', 'hashlib.md5', (['hash_attempt'], {}), '(hash_attempt)\n', (203, 217), False, 'import hashlib\n')] |
import rospy
from clover import srv
from std_srvs.srv import Trigger
import math
from common import wait_arrival
rospy.init_node('flight')
get_telemetry = rospy.ServiceProxy('get_telemetry', srv.GetTelemetry)
navigate_global = rospy.ServiceProxy('navigate_global', srv.NavigateGlobal)
actual = get_telemetry()
print('Fly to GPS position')
# lonley landing pad
navigate_global(lat=47.397664, lon=8.5452953, z=actual.z, yaw=math.inf, speed=1)
wait_arrival()
rospy.loginfo('Arrive to 1. point')
rospy.sleep(5.0)
# landing pad with rover
navigate_global(lat=47.3975913, lon=8.5456449, z=actual.z, yaw=math.inf, speed=1)
wait_arrival()
rospy.loginfo('Arrive to 2. point')
rospy.sleep(5.0)
# landing pad with pickoup
navigate_global(lat=47.3980788, lon=8.5457014, z=actual.z, yaw=math.inf, speed=1)
wait_arrival()
rospy.loginfo('Arrive to 3. point')
rospy.sleep(5.0) | [
"rospy.init_node",
"common.wait_arrival",
"rospy.ServiceProxy",
"rospy.sleep",
"rospy.loginfo"
] | [((114, 139), 'rospy.init_node', 'rospy.init_node', (['"""flight"""'], {}), "('flight')\n", (129, 139), False, 'import rospy\n'), ((157, 210), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""get_telemetry"""', 'srv.GetTelemetry'], {}), "('get_telemetry', srv.GetTelemetry)\n", (175, 210), False, 'import rospy\n'), ((229, 286), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""navigate_global"""', 'srv.NavigateGlobal'], {}), "('navigate_global', srv.NavigateGlobal)\n", (247, 286), False, 'import rospy\n'), ((446, 460), 'common.wait_arrival', 'wait_arrival', ([], {}), '()\n', (458, 460), False, 'from common import wait_arrival\n'), ((462, 497), 'rospy.loginfo', 'rospy.loginfo', (['"""Arrive to 1. point"""'], {}), "('Arrive to 1. point')\n", (475, 497), False, 'import rospy\n'), ((498, 514), 'rospy.sleep', 'rospy.sleep', (['(5.0)'], {}), '(5.0)\n', (509, 514), False, 'import rospy\n'), ((623, 637), 'common.wait_arrival', 'wait_arrival', ([], {}), '()\n', (635, 637), False, 'from common import wait_arrival\n'), ((639, 674), 'rospy.loginfo', 'rospy.loginfo', (['"""Arrive to 2. point"""'], {}), "('Arrive to 2. point')\n", (652, 674), False, 'import rospy\n'), ((675, 691), 'rospy.sleep', 'rospy.sleep', (['(5.0)'], {}), '(5.0)\n', (686, 691), False, 'import rospy\n'), ((802, 816), 'common.wait_arrival', 'wait_arrival', ([], {}), '()\n', (814, 816), False, 'from common import wait_arrival\n'), ((818, 853), 'rospy.loginfo', 'rospy.loginfo', (['"""Arrive to 3. point"""'], {}), "('Arrive to 3. point')\n", (831, 853), False, 'import rospy\n'), ((854, 870), 'rospy.sleep', 'rospy.sleep', (['(5.0)'], {}), '(5.0)\n', (865, 870), False, 'import rospy\n')] |
import re
ABBA = re.compile("([a-z])([a-z])\2\1")
def _is_abba(s):
return (len(s) == 4 and s[0] == s[3] and s[1] == s[2] and s[0] != s[1])
def _is_aba(s):
return (len(s) == 3 and s[0] == s[2] and s[0] != s[1] and
"[" not in s and "]" not in s)
def has_tls_support(ip):
has_abba = False
currently_squared = False
last_4 = []
for c in ip:
if c == "[":
currently_squared = True
elif c == "]":
currently_squared = False
last_4 = last_4[-3:] + [c]
if _is_abba(last_4):
if currently_squared:
return False
else:
has_abba = True
return has_abba
def has_ssl_support(ip):
abas = set()
babs = set()
currently_squared = False
last_3 = []
for c in ip:
if c == "[":
currently_squared = True
elif c == "]":
currently_squared = False
last_3 = last_3[-2:] + [c]
if _is_aba(last_3):
if currently_squared:
babs.add((last_3[1], last_3[0]))
else:
abas.add((last_3[0], last_3[1]))
return bool(abas.intersection(babs))
def count_supporting_ips(seq, checkfun):
return len([ip for ip in seq if checkfun(ip)])
if __name__ == '__main__':
with open("data/7.txt") as inp:
input_data = list(inp)
print(count_supporting_ips(input_data, has_tls_support))
print(count_supporting_ips(input_data, has_ssl_support))
| [
"re.compile"
] | [((18, 54), 're.compile', 're.compile', (['"""([a-z])([a-z])\x02\x01"""'], {}), "('([a-z])([a-z])\\x02\\x01')\n", (28, 54), False, 'import re\n')] |
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
from mrcnn.config import Config
#import utils
from mrcnn import model as modellib,utils
from mrcnn import visualize
import yaml
from mrcnn.model import log
from PIL import Image
ROOT_DIR = os.path.abspath("/home/user/Mask_RCNN/")
# ROOT_DIR = os.getcwd()
sys.path.append(ROOT_DIR) # To find local version of the library
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
iter_num = 0
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
print("download**************")
class ShapesConfig(Config):
"""Configuration for training on the dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "shapes"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + (Horse and Man)
# Number of training steps per epoch
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 1024
IMAGE_MAX_DIM = 1024
# # Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = ( 32, 64 , 128 ,256,512) # anchor side in pixels
TRAIN_ROIS_PER_IMAGE =100
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
# DETECTION_MIN_CONFIDENCE = 0.9
VALIDATION_STEPS = 20
config = ShapesConfig()
config.display()
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
def list2array(list):
b = np.array(list[0])
for i in range(1, len(list)):
b = np.append(b, list[i],axis=0)
return b
def text_save(filename, data):#filename为写入CSV文件的路径,data为要写入数据列表.
file = open(filename,'a')
for i in range(len(data)):
s = str(data[i]).replace('[','').replace(']','')#去除[],这两行按数据不同,可以选择
s = s.replace("'",'').replace(',','') +'\n' #去除单引号,逗号,每行末尾追加换行符
file.write(s)
file.close()
print("保存txt文件成功")
# def display_image_samples(dataset_train):
# # Load and display random samples
# image_ids = np.random.choice(dataset_train.image_ids, 4)
# for image_id in image_ids:
# image = dataset_train.load_image(image_id)
# mask, class_ids = dataset_train.load_mask(image_id)
# visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
class DrugDataset(utils.Dataset):
# 得到該圖中有多少個例項(物體)
def get_obj_index(self,image):
n = np.max(image)
return n
# 解析labelme中得到的yaml檔案,從而得到mask每一層對應的例項標籤
def from_yaml_get_class(self, image_id):
info = self.image_info[image_id]
# print(info)
with open(info['yaml_path']) as f:
temp = yaml.load(f.read(),Loader=yaml.FullLoader)
labels = temp['label_names']
del labels[0]
return labels
def draw_mask(self, num_obj, mask, image,image_id):
#print("draw_mask-->",image_id)
#print("self.image_info",self.image_info)
info = self.image_info[image_id]
#print("info-->",info)
#print("info[width]----->",info['width'],"-info[height]--->",info['height'])
for index in range(num_obj):
for i in range(info['width']):
for j in range(info['height']):
#print("image_id-->",image_id,"-i--->",i,"-j--->",j)
#print("info[width]----->",info['width'],"-info[height]--->",info['height'])
at_pixel = image.getpixel((i, j))
if at_pixel == index + 1:
mask[j, i, index] = 1
return mask
def load_shapes(self, count, img_floder, mask_floder, imglist, dataset_root_path):
"""Generate the requested number of synthetic images.
count: number of images to generate.
height, width: the size of the generated images.
"""
# Add classes 增加類別
self.add_class("shapes", 1, "pen")
for i in range(count):
# 獲取圖片寬和高
# print(i)
filestr = imglist[i].split(".")[0]
# print(filestr)
# print(imglist[i],"-->",cv_img.shape[1],"--->",cv_img.shape[0])
# print("id-->", i, " imglist[", i, "]-->", imglist[i],"filestr-->",filestr)
# filestr = filestr.split("_")[1]
mask_path = mask_floder +"/" + filestr + ".png"
# print(mask_path)
yaml_path = dataset_root_path + "/labelme_json/" + filestr + "_json/info.yaml"
# print(yaml_path)
# print(dataset_root_path + "/labelme_json/" + filestr + "_json/info.png")
cv_img = cv2.imread(dataset_root_path +"/labelme_json/" + filestr + "_json/img.png")
# print(cv_img.shape[1])
# print(cv_img.shape[0])
# print(cv_img)
# path=img_floder +"/"+ imglist[i]
self.add_image(source="shapes", image_id=i, path=img_floder +"/"+ imglist[i],
width=cv_img.shape[1], height=cv_img.shape[0], mask_path=mask_path, yaml_path=yaml_path)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
global iter_num
# print("image_id", image_id)
info = self.image_info[image_id]
count = 1 # number of object
img = Image.open(info['mask_path'])
num_obj = self.get_obj_index(img)
mask = np.zeros([info['height'], info['width'], num_obj], dtype=np.uint8)
mask = self.draw_mask(num_obj, mask, img, image_id)
occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)
for i in range(count - 2, -1, -1):
mask[:, :, i] = mask[:, :, i] * occlusion
occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))
labels = []
labels = self.from_yaml_get_class(image_id)
# print(len(labels))
labels_form = []
for i in range(len(labels)):
if labels[i].find("pen") != -1:
labels_form.append("pen")
# elif labels[i].find("triangle")!=-1:
# #print "column"
# labels_form.append("triangle")
class_ids = np.array([self.class_names.index(s) for s in labels_form])
# print("class_id: ",class_ids)
return mask, class_ids.astype(np.int32)
class InferenceConfig(ShapesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
def test_model():
# 基礎設定
dataset_root_path = r"/home/user/TODGX/test_data"
img_floder = os.path.join(dataset_root_path, "pic")
mask_floder = os.path.join(dataset_root_path, "cv2_mask")
# yaml_floder = dataset_root_path
imglist = os.listdir(img_floder) #照片張數
# print(imglist)
count = len(imglist)
# print(count)
# train與val資料集準備
# dataset_train = DrugDataset()
# dataset_train.load_shapes(count, img_floder, mask_floder, imglist, dataset_root_path)
# dataset_train.prepare()
# display_image_samples(dataset_train)
# print("dataset_train-->",dataset_train._image_ids)
dataset_test = DrugDataset()
dataset_test.load_shapes(count, img_floder, mask_floder, imglist, dataset_root_path)
dataset_test.prepare()
inference_config = InferenceConfig()
model_path = "/home/user/Mask_RCNN/mask_rcnn_coco_0060.h5"
model = modellib.MaskRCNN(mode="inference", config=inference_config,model_dir=MODEL_DIR)
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
print(dataset_test.image_ids)
img_list = np.random.choice(dataset_test.image_ids, 85)
APs = []
count1 = 0
for image_id in img_list:
# 加载测试集的ground truth
image, image_meta, gt_class_id, gt_bbox, gt_mask = \
modellib.load_image_gt(dataset_test, inference_config,image_id, use_mini_mask=False)
# 将所有ground truth载入并保存
if count1 == 0:
save_box, save_class, save_mask = gt_bbox, gt_class_id, gt_mask
else:
save_box = np.concatenate((save_box, gt_bbox), axis=0)
save_class = np.concatenate((save_class, gt_class_id), axis=0)
save_mask = np.concatenate((save_mask, gt_mask), axis=2)
molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
# 启动检测
results = model.detect([image], verbose=0)
r = results[0]
# 将所有检测结果保存
if count1 == 0:
save_roi, save_id, save_score, save_m = r["rois"], r["class_ids"], r["scores"], r['masks']
else:
save_roi = np.concatenate((save_roi, r["rois"]), axis=0)
save_id = np.concatenate((save_id, r["class_ids"]), axis=0)
save_score = np.concatenate((save_score, r["scores"]), axis=0)
save_m = np.concatenate((save_m, r['masks']), axis=2)
count1 += 1
# 计算AP, precision, recall
AP, precisions, recalls, overlaps = \
utils.compute_ap(save_box, save_class, save_mask,save_roi, save_id, save_score, save_m,iou_threshold=0.5)
print("AP: ", AP)
print("mAP: ", np.mean(AP))
plt.plot(recalls, precisions, 'b', label='PR')
plt.title('precision-recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend()
plt.show()
text_save('Kpreci.txt', precisions)
text_save('Krecall.txt', recalls)
# inference_config = InferenceConfig()
# model = modellib.MaskRCNN(mode="inference",
# config=inference_config,
# model_dir=MODEL_DIR)
# model_path = os.path.join(MODEL_DIR, "KL1000.h5") # 修改成自己训练好的模型
# # Load trained weights
# print("Loading weights from ", model_path)
# model.load_weights(model_path, by_name=True)
# img_list = np.random.choice(dataset_test.image_ids, 85)
# APs = []
if __name__ == "__main__":
test_model()
# test_model, inference_config = load_test_model(class_number)
# test_random_image(test_model, dataset_val, inference_config)
| [
"mrcnn.model.MaskRCNN",
"mrcnn.utils.download_trained_weights",
"matplotlib.pyplot.ylabel",
"numpy.logical_not",
"numpy.array",
"sys.path.append",
"os.path.exists",
"numpy.mean",
"os.listdir",
"mrcnn.model.mold_image",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.concatenate",
"numpy.random.choice",
"matplotlib.pyplot.title",
"cv2.imread",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"mrcnn.utils.compute_ap",
"PIL.Image.open",
"mrcnn.model.load_image_gt",
"os.path.join",
"numpy.append",
"numpy.zeros",
"os.path.abspath",
"matplotlib.pyplot.subplots"
] | [((364, 404), 'os.path.abspath', 'os.path.abspath', (['"""/home/user/Mask_RCNN/"""'], {}), "('/home/user/Mask_RCNN/')\n", (379, 404), False, 'import os\n'), ((431, 456), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (446, 456), False, 'import sys\n'), ((509, 539), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (521, 539), False, 'import os\n'), ((604, 647), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mask_rcnn_coco.h5"""'], {}), "(ROOT_DIR, 'mask_rcnn_coco.h5')\n", (616, 647), False, 'import os\n'), ((655, 686), 'os.path.exists', 'os.path.exists', (['COCO_MODEL_PATH'], {}), '(COCO_MODEL_PATH)\n', (669, 686), False, 'import os\n'), ((692, 739), 'mrcnn.utils.download_trained_weights', 'utils.download_trained_weights', (['COCO_MODEL_PATH'], {}), '(COCO_MODEL_PATH)\n', (722, 739), False, 'from mrcnn import model as modellib, utils\n'), ((2123, 2183), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'figsize': '(size * cols, size * rows)'}), '(rows, cols, figsize=(size * cols, size * rows))\n', (2135, 2183), True, 'import matplotlib.pyplot as plt\n'), ((2229, 2246), 'numpy.array', 'np.array', (['list[0]'], {}), '(list[0])\n', (2237, 2246), True, 'import numpy as np\n'), ((7263, 7301), 'os.path.join', 'os.path.join', (['dataset_root_path', '"""pic"""'], {}), "(dataset_root_path, 'pic')\n", (7275, 7301), False, 'import os\n'), ((7320, 7363), 'os.path.join', 'os.path.join', (['dataset_root_path', '"""cv2_mask"""'], {}), "(dataset_root_path, 'cv2_mask')\n", (7332, 7363), False, 'import os\n'), ((7416, 7438), 'os.listdir', 'os.listdir', (['img_floder'], {}), '(img_floder)\n', (7426, 7438), False, 'import os\n'), ((8061, 8147), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'inference_config', 'model_dir': 'MODEL_DIR'}), "(mode='inference', config=inference_config, model_dir=\n MODEL_DIR)\n", (8078, 8147), True, 'from mrcnn import model as modellib, utils\n'), ((8287, 8331), 'numpy.random.choice', 'np.random.choice', (['dataset_test.image_ids', '(85)'], {}), '(dataset_test.image_ids, 85)\n', (8303, 8331), True, 'import numpy as np\n'), ((9648, 9759), 'mrcnn.utils.compute_ap', 'utils.compute_ap', (['save_box', 'save_class', 'save_mask', 'save_roi', 'save_id', 'save_score', 'save_m'], {'iou_threshold': '(0.5)'}), '(save_box, save_class, save_mask, save_roi, save_id,\n save_score, save_m, iou_threshold=0.5)\n', (9664, 9759), False, 'from mrcnn import model as modellib, utils\n'), ((9815, 9861), 'matplotlib.pyplot.plot', 'plt.plot', (['recalls', 'precisions', '"""b"""'], {'label': '"""PR"""'}), "(recalls, precisions, 'b', label='PR')\n", (9823, 9861), True, 'import matplotlib.pyplot as plt\n'), ((9866, 9901), 'matplotlib.pyplot.title', 'plt.title', (['"""precision-recall curve"""'], {}), "('precision-recall curve')\n", (9875, 9901), True, 'import matplotlib.pyplot as plt\n'), ((9906, 9926), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (9916, 9926), True, 'import matplotlib.pyplot as plt\n'), ((9931, 9954), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (9941, 9954), True, 'import matplotlib.pyplot as plt\n'), ((9959, 9971), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9969, 9971), True, 'import matplotlib.pyplot as plt\n'), ((9976, 9986), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9984, 9986), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2322), 'numpy.append', 'np.append', (['b', 'list[i]'], {'axis': '(0)'}), '(b, list[i], axis=0)\n', (2302, 2322), True, 'import numpy as np\n'), ((3166, 3179), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (3172, 3179), True, 'import numpy as np\n'), ((6072, 6101), 'PIL.Image.open', 'Image.open', (["info['mask_path']"], {}), "(info['mask_path'])\n", (6082, 6101), False, 'from PIL import Image\n'), ((6159, 6225), 'numpy.zeros', 'np.zeros', (["[info['height'], info['width'], num_obj]"], {'dtype': 'np.uint8'}), "([info['height'], info['width'], num_obj], dtype=np.uint8)\n", (6167, 6225), True, 'import numpy as np\n'), ((8489, 8578), 'mrcnn.model.load_image_gt', 'modellib.load_image_gt', (['dataset_test', 'inference_config', 'image_id'], {'use_mini_mask': '(False)'}), '(dataset_test, inference_config, image_id,\n use_mini_mask=False)\n', (8511, 8578), True, 'from mrcnn import model as modellib, utils\n'), ((9797, 9808), 'numpy.mean', 'np.mean', (['AP'], {}), '(AP)\n', (9804, 9808), True, 'import numpy as np\n'), ((5363, 5439), 'cv2.imread', 'cv2.imread', (["(dataset_root_path + '/labelme_json/' + filestr + '_json/img.png')"], {}), "(dataset_root_path + '/labelme_json/' + filestr + '_json/img.png')\n", (5373, 5439), False, 'import cv2\n'), ((8738, 8781), 'numpy.concatenate', 'np.concatenate', (['(save_box, gt_bbox)'], {'axis': '(0)'}), '((save_box, gt_bbox), axis=0)\n', (8752, 8781), True, 'import numpy as np\n'), ((8807, 8856), 'numpy.concatenate', 'np.concatenate', (['(save_class, gt_class_id)'], {'axis': '(0)'}), '((save_class, gt_class_id), axis=0)\n', (8821, 8856), True, 'import numpy as np\n'), ((8881, 8925), 'numpy.concatenate', 'np.concatenate', (['(save_mask, gt_mask)'], {'axis': '(2)'}), '((save_mask, gt_mask), axis=2)\n', (8895, 8925), True, 'import numpy as np\n'), ((8967, 9011), 'mrcnn.model.mold_image', 'modellib.mold_image', (['image', 'inference_config'], {}), '(image, inference_config)\n', (8986, 9011), True, 'from mrcnn import model as modellib, utils\n'), ((9285, 9330), 'numpy.concatenate', 'np.concatenate', (["(save_roi, r['rois'])"], {'axis': '(0)'}), "((save_roi, r['rois']), axis=0)\n", (9299, 9330), True, 'import numpy as np\n'), ((9353, 9402), 'numpy.concatenate', 'np.concatenate', (["(save_id, r['class_ids'])"], {'axis': '(0)'}), "((save_id, r['class_ids']), axis=0)\n", (9367, 9402), True, 'import numpy as np\n'), ((9428, 9477), 'numpy.concatenate', 'np.concatenate', (["(save_score, r['scores'])"], {'axis': '(0)'}), "((save_score, r['scores']), axis=0)\n", (9442, 9477), True, 'import numpy as np\n'), ((9499, 9543), 'numpy.concatenate', 'np.concatenate', (["(save_m, r['masks'])"], {'axis': '(2)'}), "((save_m, r['masks']), axis=2)\n", (9513, 9543), True, 'import numpy as np\n'), ((6306, 6336), 'numpy.logical_not', 'np.logical_not', (['mask[:, :, -1]'], {}), '(mask[:, :, -1])\n', (6320, 6336), True, 'import numpy as np\n'), ((6501, 6530), 'numpy.logical_not', 'np.logical_not', (['mask[:, :, i]'], {}), '(mask[:, :, i])\n', (6515, 6530), True, 'import numpy as np\n')] |
from django.urls import re_path
from autism_gene_profiles_api import views
urlpatterns = [
re_path(
r"^/configuration/?$",
views.ConfigurationView.as_view(),
name="agp_configuration",
),
re_path(
r"^/genes/?$",
views.QueryProfilesView.as_view(),
name="agp_profiles_query",
),
re_path(
r"^/genes/(?P<gene_symbol>.+)/?$",
views.ProfileView.as_view(),
name="agp_profile"
)
]
| [
"autism_gene_profiles_api.views.ProfileView.as_view",
"autism_gene_profiles_api.views.QueryProfilesView.as_view",
"autism_gene_profiles_api.views.ConfigurationView.as_view"
] | [((144, 177), 'autism_gene_profiles_api.views.ConfigurationView.as_view', 'views.ConfigurationView.as_view', ([], {}), '()\n', (175, 177), False, 'from autism_gene_profiles_api import views\n'), ((264, 297), 'autism_gene_profiles_api.views.QueryProfilesView.as_view', 'views.QueryProfilesView.as_view', ([], {}), '()\n', (295, 297), False, 'from autism_gene_profiles_api import views\n'), ((405, 432), 'autism_gene_profiles_api.views.ProfileView.as_view', 'views.ProfileView.as_view', ([], {}), '()\n', (430, 432), False, 'from autism_gene_profiles_api import views\n')] |
# -*- coding: utf-8 -*-
"""
The Synchronous Runner
~~~~~~~~~~~~~~~~~~~~~~
The synchronous runner is a backend for the sync/async experiment that provides
a "run until complete"-like interface for entirely synchronous code.
Essentially, it does not actually yield the flow of control to an event loop:
instead, it entirely synchronously steps through the I/O.
To do this, it takes an object that it can repeatedly call ``next()`` on, which
will provide bytes each time that ``next()`` call is made, and returns a
generator. This generator is used essentially as a coroutine: each time
``next()`` is called, the generator will issue a call to ``select()``. The
``select()`` call has four possible outcomes:
1. timeout, which will yield an exception. This allows the calling code to make
a decision about whether to consider the timeout a problem or not.
2. socket readable, in which case the code will issue a read and yield the
read bytes.
3. socket writeable, in which case the code will call ``next()`` and write the
returned bytes to the socket.
4. socket closed (readable with empty read), in which case the generator will
exit.
"""
# TODO: Move to selectors module.
# TODO: Remember to force sockets to nonblocking.
import select
def socket_send_loop(sock, byte_source, timeout=5):
"""
The socket sending loop.
That timeout should be more clever, but for now it'll do.
"""
rlist = [sock]
wlist = [sock]
buffered_bytes = b''
while True:
rlist, wlist, _ = select.select([sock], [sock], [], timeout)
if rlist:
read_data = sock.recv(8192)
if not read_data:
# Socket closed.
return
yield read_data
elif wlist:
if buffered_bytes:
data_to_send = buffered_bytes
else:
try:
data_to_send = next(byte_source)
except StopIteration:
# Sending is done. We should stop checking if the socket is
# writeable.
wlist = []
continue
sent_bytes = sock.send(data_to_send)
buffered_bytes = data_to_send[sent_bytes:]
def _request_generator(request, data_handler):
"""
Transforms a request into a generator of bytes. This allows the sending
loop to "pull" request bytes when it has room to send them.
"""
# First, the request header.
yield data_handler.request_to_bytes(request)
# Then, for the body. The body can be bytes or an iterator, but that's it.
# The iterator is the more general case, so let's transform the bytes into
# an iterator via my friend the list.
if isinstance(request.body, bytes):
body = [request.body]
else:
body = request.body
for data_chunk in body:
yield data_handler.body_chunk_to_bytes(data_chunk)
yield data_handler.end_of_body()
def run(request, data_handler, sock):
"""
A synchronous request/response sender.
This method takes a request and a data handler. The request codifies the
request to be sent.
The data handler contains most of the intelligence in the code. It is a
complex object that must have two methods:
- one that takes a request and returns a generator that builds bytes.
- one that receives bytes and returns a response object or None.
This will run the socket loop indefinitely until a response object is
returned from the data handler.
This does not yet handle:
- 100 continue (not clear we should handle that at all)
- HTTP/2, which has some concerns about this interface
- plenty of error cases!
- socket connection
- connection pooling
"""
rgen = _request_generator(request, data_handler)
sock_loop = socket_send_loop(sock, rgen)
for byte_chunk in sock_loop:
response = data_handler.receive_bytes(byte_chunk)
if response is not None:
return response
| [
"select.select"
] | [((1516, 1558), 'select.select', 'select.select', (['[sock]', '[sock]', '[]', 'timeout'], {}), '([sock], [sock], [], timeout)\n', (1529, 1558), False, 'import select\n')] |
import unittest
from snlp.mwes.am import extract_ncs_from_sent, calculate_am, get_counts
class TestAms(unittest.TestCase):
def test_extract_nsc(self):
self.assertRaises(TypeError, extract_ncs_from_sent, 5)
self.assertRaises(TypeError, extract_ncs_from_sent, -2)
self.assertRaises(TypeError, extract_ncs_from_sent, [1, 2, 3])
self.assertRaises(TypeError, extract_ncs_from_sent, [1, 2, 'test'])
self.assertRaises(ValueError, extract_ncs_from_sent, '')
if __name__ == '__main__':
unittest.main() | [
"unittest.main"
] | [((529, 544), 'unittest.main', 'unittest.main', ([], {}), '()\n', (542, 544), False, 'import unittest\n')] |
#Dialog for Results of Calculation.
import wx
from App_DB_Bridge import send2DB
'''
---------- RESULT DIALOG ------------
'''
class resultDialog(wx.Dialog):
def __init__(self,userData, resultData):
super(resultDialog,self).__init__(None,wx.ID_ANY,"Results")
#labels
self.data_to_display = list()
self.num_labels = 0
#Dialgo Panel
result_dg_panel = wx.Panel(self,wx.ID_ANY,size=(600,700))
#Dialog SataticText Widgets
st_widgets = list()
#Dialog BoxSizer
dg_bx_sizer = wx.BoxSizer(wx.VERTICAL)
#Setting up data to be display for dialog.
self.data_to_display, self.num_labels = self.prepare_labels(userData, resultData)
#Create Widgets
for i in range(self.num_labels):
st_widgets.append(wx.StaticText(result_dg_panel,wx.ID_ANY,self.data_to_display[i]))
#Create Sizers for Widgets
for ii in range(self.num_labels):
dg_bx_sizer.Add(st_widgets[ii],0,wx.ALL,0)
#Create "Save Data" Radio Button
#SaveDataRadioButton = wx.RadioButton(self,wx.ID_ANY,label="SAVE DATA?")
#Add it to the BoxSizer
#dg_bx_sizer.Add(SaveDataRadioButton,0,wx.ALL,3)
'''MAKE BOX TO SAVE THAT by USER CHOICE'''
send2DB(userData, resultData)
result_dg_panel.SetSizer(dg_bx_sizer)
def prepare_labels(self,userData,resultData):
dg_labels=list()
#Extract userData & resultData keys
ud_keys = list(userData.keys())
rd_keys = list(resultData.keys())
ud_value=list()
rd_value=list()
#Extract values from userData & resultData
for ud_key in ud_keys:
ud_value.append(userData[ud_key])
for rd_key in rd_keys:
rd_value.append(resultData[rd_key])
#Concatenate Labels
for i in range(len(ud_keys)):
tmp = ud_keys[i]+": "+str(ud_value[i])
dg_labels.append(tmp)
for ii in range(len(rd_keys)):
tmp = rd_keys[ii]+": "+str(rd_value[ii])
dg_labels.append(tmp)
return dg_labels, len(dg_labels)
'''
----------- RUN RESULT DIALOG ------------
'''
def result_Dialog(userData, resultData):
RtDlg = resultDialog(userData,resultData)
RtDlg.ShowModal()
RtDlg.Destroy()
| [
"wx.StaticText",
"wx.Panel",
"App_DB_Bridge.send2DB",
"wx.BoxSizer"
] | [((409, 451), 'wx.Panel', 'wx.Panel', (['self', 'wx.ID_ANY'], {'size': '(600, 700)'}), '(self, wx.ID_ANY, size=(600, 700))\n', (417, 451), False, 'import wx\n'), ((560, 584), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (571, 584), False, 'import wx\n'), ((1303, 1332), 'App_DB_Bridge.send2DB', 'send2DB', (['userData', 'resultData'], {}), '(userData, resultData)\n', (1310, 1332), False, 'from App_DB_Bridge import send2DB\n'), ((823, 889), 'wx.StaticText', 'wx.StaticText', (['result_dg_panel', 'wx.ID_ANY', 'self.data_to_display[i]'], {}), '(result_dg_panel, wx.ID_ANY, self.data_to_display[i])\n', (836, 889), False, 'import wx\n')] |
import asyncio, time
import asyncmrcache
import tracemalloc
tracemalloc.start()
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
def lcb(client):
pass
#test( client.debug_data )
async def run(loop):
#print("1")
#await asyncio.sleep(2)
#print("1")
#await asyncio.sleep(2)
#print("1")
#rc = await asyncmrcache.create_client( [("localhost",7000),("localhost",7001)], loop, lost_cb=lcb)
#rc = await asyncmrcache.create_client( [("localhost",7000)], loop, pool_size=2,lost_cb=lcb)
rc = await asyncmrcache.create_client( [("localhost",7000)], loop, lost_cb=lcb)
print(await rc.get(b"AeeRmoU2PqMdfg0ACeCqkE7gbCuc3J94"))
exit()
if 1:
await rc.set(b"test1",b"tets1")
print(await rc.get(b"test1"))
await rc.set(b"test2",b"tets2")
print(await rc.get(b"test2"))
print(await rc.get(b"test1"))
print(await rc.get(b"test1"))
print(await rc.get(b"test2"))
print(await rc.get(b"test2"))
for x in range(2):
futs = []
futs.append( rc.get(b"test1") )
futs.append( rc.get(b"test2") )
futs.append( rc.get(b"test1") )
futs.append( rc.get(b"test2") )
futs.append( rc.get(b"test1") )
futs.append( rc.get(b"test2") )
futs.append( rc.get(b"test2") )
futs.append( rc.get(b"test2") )
futs.append( rc.get(b"test2") )
futs.append( rc.get(b"test1") )
ret = await asyncio.gather(*futs)
for v in ret:
print(v)
#rc.stat()
await asyncio.sleep(1)
exit()
await rc.set(b"test1",b"tets1")
await rc.set(b"test2",b"tets2")
await rc.set(b"test3",b"tets3")
await rc.set(b"test4",b"tets4")
await rc.set(b"test5",b"tets5")
await rc.set(b"test6",b"tets6")
await rc.set(b"test7",b"tets7")
await rc.set(b"test8",b"tets8")
await rc.set(b"test9",b"tets9")
await rc.set(b"test10",b"tets10")
await rc.set(b"test11",b"tets11")
while 1:
print("top")
futs = []
#print(await rc.get(b"test1"))
futs.append( rc.get(b"test1") )
futs.append( rc.get(b"test2") )
futs.append( rc.get(b"test3") )
futs.append( rc.get(b"test4") )
futs.append( rc.get(b"test5") )
futs.append( rc.get(b"test6") )
futs.append( rc.get(b"test7") )
futs.append( rc.get(b"test8") )
futs.append( rc.get(b"test9") )
futs.append( rc.get(b"test10") )
try:
ret = await asyncio.gather(*futs)
except Exception as e:
print(" Connection failed waiting 5: ",e)
await asyncio.sleep(5)
continue
futs = []
for v in ret:
print(v)
await asyncio.sleep(1)
print("before close")
await rc.close()
print("after close")
exit()
await rc.set(b"test1",b"test1")
await rc.set(b"test2",b"test2")
await rc.set(b"test3",b"test3")
await rc.set(b"test4",b"test4")
print(await rc.get(b"test1"))
print(await rc.get(b"test2"))
print(await rc.get(b"test3"))
print(await rc.get(b"test4"))
exit()
num_items = 2000
item_sz = 10000
#print( await rc.get(b"test541") )
#print( await rc.get(b"test615") )
if 1:
for x in range(num_items):
k = b"test" + str(x).encode()
v = b"test" + str(x).encode()
for y in range(item_sz-len(v)):
v += b'a'
await rc.set(k, v)
#print(k)
#if (x%10000)==0: print(k)
await asyncio.sleep(2)
rc.stat()
if 1:
missed = 0
for x in range(num_items):
k = b"test" + str(x).encode()
exp = b"test" + str(x).encode()
for y in range(item_sz-len(exp)):
exp += b'a'
v = await rc.get(k)
if v == None: missed += 1
if v != exp:
if v != None: print(exp[:10], " != ", v[:10])
print( "Missed ", missed )
print( "hit ", num_items-missed )
await asyncio.sleep(2)
rc.stat()
exit()
print( await rc.get(b"test22") )
print( await rc.get(b"test25") )
print( await rc.get(b"test26") )
print( await rc.get(b"test27") )
rc.set(b"test", b"good")
rc.set(b"test22", b"good")
rc.set(b"test25", b"good")
rc.set(b"test1", b"good")
rc.set(b"test212", b"good")
rc.set(b"test500", b"good")
exit()
print("A")
for x in range(1):
futs = []
print("1")
futs.append( rc.get(b"test") )
futs.append( rc.get(b"test212") )
futs.append( rc.get(b"test1") )
futs.append( rc.get(b"test") )
futs.append( rc.get(b"test") )
futs.append( rc.get(b"test500") )
futs.append( rc.get(b"test") )
futs.append( rc.get(b"test") )
ret = await asyncio.gather(*futs)
for v in ret:
if v != b"good":
print("NO",v)
exit()
#rc.set(b"ffdsad",b"dfadsfwee")
#rc.set(b"ffdsad",b"dfadsfwee")
#rc.set(b"ffdsad",b"dfadsfwee")
print("DELME")
print(rc.q)
await rc.close()
print(rc.q)
return
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(run(loop))
loop.close()
print("DONE")
| [
"tracemalloc.start",
"asyncio.gather",
"asyncmrcache.create_client",
"uvloop.EventLoopPolicy",
"asyncio.sleep",
"asyncio.get_event_loop"
] | [((62, 81), 'tracemalloc.start', 'tracemalloc.start', ([], {}), '()\n', (79, 81), False, 'import tracemalloc\n'), ((127, 151), 'uvloop.EventLoopPolicy', 'uvloop.EventLoopPolicy', ([], {}), '()\n', (149, 151), False, 'import uvloop\n'), ((4808, 4832), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4830, 4832), False, 'import asyncio, time\n'), ((536, 604), 'asyncmrcache.create_client', 'asyncmrcache.create_client', (["[('localhost', 7000)]", 'loop'], {'lost_cb': 'lcb'}), "([('localhost', 7000)], loop, lost_cb=lcb)\n", (562, 604), False, 'import asyncmrcache\n'), ((2560, 2576), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (2573, 2576), False, 'import asyncio, time\n'), ((3309, 3325), 'asyncio.sleep', 'asyncio.sleep', (['(2)'], {}), '(2)\n', (3322, 3325), False, 'import asyncio, time\n'), ((3742, 3758), 'asyncio.sleep', 'asyncio.sleep', (['(2)'], {}), '(2)\n', (3755, 3758), False, 'import asyncio, time\n'), ((4485, 4506), 'asyncio.gather', 'asyncio.gather', (['*futs'], {}), '(*futs)\n', (4499, 4506), False, 'import asyncio, time\n'), ((1397, 1418), 'asyncio.gather', 'asyncio.gather', (['*futs'], {}), '(*futs)\n', (1411, 1418), False, 'import asyncio, time\n'), ((1485, 1501), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (1498, 1501), False, 'import asyncio, time\n'), ((2362, 2383), 'asyncio.gather', 'asyncio.gather', (['*futs'], {}), '(*futs)\n', (2376, 2383), False, 'import asyncio, time\n'), ((2471, 2487), 'asyncio.sleep', 'asyncio.sleep', (['(5)'], {}), '(5)\n', (2484, 2487), False, 'import asyncio, time\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt4 import QtCore
from PyQt4 import QtGui
from modules import conf
from modules import network
cat_emoticon = {}
cat_emoticon["welcome"] = """
∧_∧ / ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄
( ´∀`) < Welcome!
/ | \ I'm your cat of
/ .| \ the day!
/ "⌒ヽ |.イ | \________
__ | .ノ | || |__
ノく__つ∪∪ \
_((________\
 ̄ ̄ヽつ ̄ ̄ ̄ ̄ ̄ ̄ | | ̄
^ _____
/ I'm fuzzy. \
\______/
"""
cat_emoticon["connection_error"] = """
∧_∧ / ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄
( ´∀`) < Connection Failure!
/ | \ Check for network
/ .| \ connection!
/ "⌒ヽ |.イ | \________
__ | .ノ | || |__
ノく__つ∪∪ \
_((________\
 ̄ ̄ヽつ ̄ ̄ ̄ ̄ ̄ ̄ | | ̄
^ _____
/ I'm fuzzy. \
\______/
"""
cat_emoticon["login_error"] = """
∧_∧ / ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄
( ´∀`) < Log-in Failure!
/ | \ Check for ID ||
/ .| \ password!
/ "⌒ヽ |.イ | \________
__ | .ノ | || |__
ノく__つ∪∪ \
_((________\
 ̄ ̄ヽつ ̄ ̄ ̄ ̄ ̄ ̄ | | ̄
^ _____
/ I'm fuzzy. \
\______/
"""
cat_emoticon["complete"] = """
∧_∧ / ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄
( ´∀`) < Process complete!
/ | \ You did the right thing.
/ .| \ He deserved more views.
/ "⌒ヽ |.イ | \________
__ | .ノ | || |__
ノく__つ∪∪ \
_((________\
 ̄ ̄ヽつ ̄ ̄ ̄ ̄ ̄ ̄ | | ̄
^ _____
/ I'm fuzzy. \
\______/
"""
cat_emoticon["counting"] = """
∧_∧ / ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄
( ´∀`) < Processing ...
/ | \ ? / ? ...
/ .| \ Please wait
/ "⌒ヽ |.イ | \________
__ | .ノ | || |__
ノく__つ∪∪ \
_((________\
 ̄ ̄ヽつ ̄ ̄ ̄ ̄ ̄ ̄ | | ̄
^ _____
/ I'm fuzzy. \
\______/
"""
class MainFrame(QtGui.QWidget):
# POST method variables
data = {"usr_id" : "",
"usr_pwd" : ""}
# GET method variables
headers = {'Referer' : conf.referer}
pools = 1
interval = 1
view_count = 0
# gui-variables
_btn = {}
_spinbox = {}
_title = {}
_textbox = {}
def __init__(self, master=None):
super(MainFrame, self).__init__()
QtCore.pyqtSignal(int)
self.grid = QtGui.QGridLayout()
self.set_title()
self.set_textbox()
self.set_spinbox()
self.set_button()
self.add_widget()
self.connect_activity()
self.setLayout(self.grid)
self.show()
def add_widget(self):
self.grid.addWidget(self._title["id"], 0, 0)
self.grid.addWidget(self._title["pwd"], 1, 0)
self.grid.addWidget(self._textbox["id"], 0, 1, 1, 3)
self.grid.addWidget(self._textbox["pwd"], 1, 1, 1, 3)
self.grid.addWidget(self._title["interval"], 2, 0)
self.grid.addWidget(self._title["cnt"], 3, 0)
self.grid.addWidget(self._spinbox["interval"], 2, 1)
self.grid.addWidget(self._spinbox["cnt"], 3, 1)
self.grid.addWidget(self._title["pool"], 2, 2)
self.grid.addWidget(self._spinbox["pool"], 2, 3)
self.grid.addWidget(self._btn["run"], 3, 2, 1, 2)
self.grid.addWidget(self._textbox["status"], 4, 0, 5, 4)
def connect_activity(self):
self._spinbox["interval"].valueChanged.connect(self.update_interval)
self._spinbox["cnt"].valueChanged.connect(self.update_view_count)
self._spinbox["pool"].valueChanged.connect(self.update_pool_number)
self._textbox["id"].textChanged[str].connect(self.update_id)
self._textbox["pwd"].textChanged[str].connect(self.update_pwd)
self._btn["run"].clicked.connect(self.run)
def set_textbox(self):
self._textbox["id"] = QtGui.QLineEdit(self)
self._textbox["pwd"] = QtGui.QLineEdit(self)
self._textbox["pwd"].setEchoMode(QtGui.QLineEdit.Password)
self._textbox["status"] = QtGui.QTextEdit(self)
self._textbox["status"].setReadOnly(True)
self._textbox["status"].setLineWrapMode(QtGui.QTextEdit.NoWrap)
if network.check_network_connection():
self._textbox["status"].setPlainText(cat_emoticon["welcome"])
else:
self._textbox["status"].setPlainText(cat_emoticon["connection_error"])
self._textbox["status"].moveCursor(QtGui.QTextCursor.Start)
def set_title(self):
self.setWindowTitle("Eclass' Got Talent")
self._title["id"] = QtGui.QLabel("Id: ")
self._title["id"].setStyleSheet("font: bold")
self._title["id"].setAlignment(QtCore.Qt.AlignCenter)
self._title["pwd"] = QtGui.QLabel("Pwd: ")
self._title["pwd"].setStyleSheet("font: bold")
self._title["pwd"].setAlignment(QtCore.Qt.AlignCenter)
self._title["interval"] = QtGui.QLabel("Interval (sec): ")
self._title["interval"].setStyleSheet("font: bold")
self._title["interval"].setAlignment(QtCore.Qt.AlignCenter)
self._title["cnt"] = QtGui.QLabel("View counts: ")
self._title["cnt"].setStyleSheet("font: bold")
self._title["cnt"].setAlignment(QtCore.Qt.AlignCenter)
self._title["pool"] = QtGui.QLabel("Pools : ")
self._title["pool"].setStyleSheet("font: bold")
self._title["pool"].setAlignment(QtCore.Qt.AlignCenter)
def set_button(self):
self._btn["run"] = QtGui.QPushButton("Let him shine", self)
self._btn["run"].setStyleSheet("font: bold")
def set_spinbox(self):
self._spinbox["interval"] = QtGui.QSpinBox()
self._spinbox["interval"].setRange(0, 100)
self._spinbox["interval"].setSingleStep(1)
self._spinbox["interval"].setValue(1)
self._spinbox["cnt"] = QtGui.QSpinBox()
self._spinbox["cnt"].setRange(0, 9999)
self._spinbox["cnt"].setSingleStep(1)
self._spinbox["cnt"].setValue(0)
self._spinbox["pool"] = QtGui.QSpinBox()
self._spinbox["pool"].setRange(0, 10)
self._spinbox["pool"].setSingleStep(1)
self._spinbox["pool"].setValue(1)
def update_pool_number(self, value):
self.pools = value
def update_view_count(self, value):
self.view_count = value
def update_interval(self, value):
self.interval = value
def update_id(self, text):
self.data["usr_id"] = text
def update_pwd(self, text):
self.data["usr_pwd"] = text
def run(self):
import time
if not network.check_network_connection():
self._textbox["status"].setPlainText(cat_emoticon["connection_error"])
return
cookie = network.authorize_session(self.data, self.headers)
if len(cookie) == 0:
self._textbox["status"].setPlainText(cat_emoticon["login_error"])
return
else:
"""
cat_emoticon["counting"].replace("?", "0", 1)
cat_emoticon["counting"].replace("?", str(self.view_count), 1)
self._textbox["status"].setPlainText(cat_emoticon["counting"])
"""
for i in range(self.view_count):
network.request_lecture(cookie, self.interval)
self._textbox["status"].setPlainText(cat_emoticon["complete"])
| [
"PyQt4.QtCore.pyqtSignal",
"PyQt4.QtGui.QSpinBox",
"modules.network.authorize_session",
"PyQt4.QtGui.QLabel",
"PyQt4.QtGui.QPushButton",
"modules.network.request_lecture",
"PyQt4.QtGui.QLineEdit",
"PyQt4.QtGui.QGridLayout",
"modules.network.check_network_connection",
"PyQt4.QtGui.QTextEdit"
] | [((2482, 2504), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['int'], {}), '(int)\n', (2499, 2504), False, 'from PyQt4 import QtCore\n'), ((2526, 2545), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (2543, 2545), False, 'from PyQt4 import QtGui\n'), ((4008, 4029), 'PyQt4.QtGui.QLineEdit', 'QtGui.QLineEdit', (['self'], {}), '(self)\n', (4023, 4029), False, 'from PyQt4 import QtGui\n'), ((4061, 4082), 'PyQt4.QtGui.QLineEdit', 'QtGui.QLineEdit', (['self'], {}), '(self)\n', (4076, 4082), False, 'from PyQt4 import QtGui\n'), ((4185, 4206), 'PyQt4.QtGui.QTextEdit', 'QtGui.QTextEdit', (['self'], {}), '(self)\n', (4200, 4206), False, 'from PyQt4 import QtGui\n'), ((4341, 4375), 'modules.network.check_network_connection', 'network.check_network_connection', ([], {}), '()\n', (4373, 4375), False, 'from modules import network\n'), ((4722, 4742), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Id: """'], {}), "('Id: ')\n", (4734, 4742), False, 'from PyQt4 import QtGui\n'), ((4889, 4910), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Pwd: """'], {}), "('Pwd: ')\n", (4901, 4910), False, 'from PyQt4 import QtGui\n'), ((5064, 5096), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Interval (sec): """'], {}), "('Interval (sec): ')\n", (5076, 5096), False, 'from PyQt4 import QtGui\n'), ((5255, 5284), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""View counts: """'], {}), "('View counts: ')\n", (5267, 5284), False, 'from PyQt4 import QtGui\n'), ((5434, 5458), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Pools : """'], {}), "('Pools : ')\n", (5446, 5458), False, 'from PyQt4 import QtGui\n'), ((5634, 5674), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Let him shine"""', 'self'], {}), "('Let him shine', self)\n", (5651, 5674), False, 'from PyQt4 import QtGui\n'), ((5793, 5809), 'PyQt4.QtGui.QSpinBox', 'QtGui.QSpinBox', ([], {}), '()\n', (5807, 5809), False, 'from PyQt4 import QtGui\n'), ((5990, 6006), 'PyQt4.QtGui.QSpinBox', 'QtGui.QSpinBox', ([], {}), '()\n', (6004, 6006), False, 'from PyQt4 import QtGui\n'), ((6174, 6190), 'PyQt4.QtGui.QSpinBox', 'QtGui.QSpinBox', ([], {}), '()\n', (6188, 6190), False, 'from PyQt4 import QtGui\n'), ((6891, 6941), 'modules.network.authorize_session', 'network.authorize_session', (['self.data', 'self.headers'], {}), '(self.data, self.headers)\n', (6916, 6941), False, 'from modules import network\n'), ((6735, 6769), 'modules.network.check_network_connection', 'network.check_network_connection', ([], {}), '()\n', (6767, 6769), False, 'from modules import network\n'), ((7385, 7431), 'modules.network.request_lecture', 'network.request_lecture', (['cookie', 'self.interval'], {}), '(cookie, self.interval)\n', (7408, 7431), False, 'from modules import network\n')] |
from zdppy_requests import requests
response = requests.get("https://www.baidu.com/")
print(response.status_code)
print(response.text)
| [
"zdppy_requests.requests.get"
] | [((48, 86), 'zdppy_requests.requests.get', 'requests.get', (['"""https://www.baidu.com/"""'], {}), "('https://www.baidu.com/')\n", (60, 86), False, 'from zdppy_requests import requests\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# __author__ = 'Liantian'
# __email__ = "<EMAIL>"
#
# MIT License
#
# Copyright (c) 2018 liantian
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import mimetypes
import random
import string
import base62
# from google.appengine.api import images
from google.appengine.runtime.apiproxy_errors import RequestTooLargeError
from flask import Flask, request, jsonify, render_template, url_for
from flask_bootstrap import Bootstrap
from forms import PostForm
from models import Post, Attachment
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = ''.join(random.choice(string.ascii_uppercase + string.digits) for i in range(32))
Bootstrap(app)
@app.route('/admin/', methods=['POST', "GET"])
def index():
post = Post.query().get()
form = PostForm(obj=post)
if form.validate_on_submit():
form.save()
return render_template("admin.html", post=post, form=form)
@app.route('/admin/editormd_image_upload/', methods=['POST'], endpoint="admin.editormd_image_upload")
def editormd_image_upload():
mimetypes.init()
if 'editormd-image-file' not in request.files:
return jsonify({"success": 0, "message": u"No file part"})
file = request.files['editormd-image-file']
if file.filename == '':
return jsonify({"success": 0, "message": u"No selected file"})
if file:
filename = file.filename
mime_type = mimetypes.guess_type(filename)[0] or "application/octet-stream"
att = Attachment()
att.filename = filename
att.mime_type = mime_type
f = file.read()
if mime_type in ('image/jpeg', 'image/png', 'image/gif'):
# f = images.im_feeling_lucky(f)
pass
att.file = f
try:
att_key = att.put()
except RequestTooLargeError:
return jsonify({"success": 0, "message": u"RequestTooLargeError"})
url = url_for("download", key=base62.encode(att_key.integer_id()), filename=filename)
return jsonify({"success": 1, "message": u"No allowed_file", "url": url})
return jsonify({"success": 0, "message": u"No allowed_file"})
@app.route('/att/<key>/<filename>', methods=['GET'], endpoint="download")
def download(key, filename):
return "None"
| [
"flask.render_template",
"random.choice",
"mimetypes.init",
"models.Attachment",
"models.Post.query",
"flask.Flask",
"forms.PostForm",
"flask_bootstrap.Bootstrap",
"mimetypes.guess_type",
"flask.jsonify"
] | [((1556, 1571), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1561, 1571), False, 'from flask import Flask, request, jsonify, render_template, url_for\n'), ((1698, 1712), 'flask_bootstrap.Bootstrap', 'Bootstrap', (['app'], {}), '(app)\n', (1707, 1712), False, 'from flask_bootstrap import Bootstrap\n'), ((1816, 1834), 'forms.PostForm', 'PostForm', ([], {'obj': 'post'}), '(obj=post)\n', (1824, 1834), False, 'from forms import PostForm\n'), ((1900, 1951), 'flask.render_template', 'render_template', (['"""admin.html"""'], {'post': 'post', 'form': 'form'}), "('admin.html', post=post, form=form)\n", (1915, 1951), False, 'from flask import Flask, request, jsonify, render_template, url_for\n'), ((2089, 2105), 'mimetypes.init', 'mimetypes.init', ([], {}), '()\n', (2103, 2105), False, 'import mimetypes\n'), ((3117, 3171), 'flask.jsonify', 'jsonify', (["{'success': 0, 'message': u'No allowed_file'}"], {}), "({'success': 0, 'message': u'No allowed_file'})\n", (3124, 3171), False, 'from flask import Flask, request, jsonify, render_template, url_for\n'), ((1624, 1677), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (1637, 1677), False, 'import random\n'), ((2172, 2223), 'flask.jsonify', 'jsonify', (["{'success': 0, 'message': u'No file part'}"], {}), "({'success': 0, 'message': u'No file part'})\n", (2179, 2223), False, 'from flask import Flask, request, jsonify, render_template, url_for\n'), ((2315, 2370), 'flask.jsonify', 'jsonify', (["{'success': 0, 'message': u'No selected file'}"], {}), "({'success': 0, 'message': u'No selected file'})\n", (2322, 2370), False, 'from flask import Flask, request, jsonify, render_template, url_for\n'), ((2515, 2527), 'models.Attachment', 'Attachment', ([], {}), '()\n', (2525, 2527), False, 'from models import Post, Attachment\n'), ((3038, 3104), 'flask.jsonify', 'jsonify', (["{'success': 1, 'message': u'No allowed_file', 'url': url}"], {}), "({'success': 1, 'message': u'No allowed_file', 'url': url})\n", (3045, 3104), False, 'from flask import Flask, request, jsonify, render_template, url_for\n'), ((1786, 1798), 'models.Post.query', 'Post.query', ([], {}), '()\n', (1796, 1798), False, 'from models import Post, Attachment\n'), ((2437, 2467), 'mimetypes.guess_type', 'mimetypes.guess_type', (['filename'], {}), '(filename)\n', (2457, 2467), False, 'import mimetypes\n'), ((2869, 2928), 'flask.jsonify', 'jsonify', (["{'success': 0, 'message': u'RequestTooLargeError'}"], {}), "({'success': 0, 'message': u'RequestTooLargeError'})\n", (2876, 2928), False, 'from flask import Flask, request, jsonify, render_template, url_for\n')] |
# -*- coding: utf-8 -*-
# Python imports
import codecs
import csv
from datetime import timedelta, datetime, time
from functools import wraps
import logging
# Django imports
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db import transaction
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import override, ugettext_lazy as _
# 3rd party imports
from pytz import timezone
# This project's imports
from .forms import RegistrationCenterCSVForm, CSV_FIELDS
from .models import Blacklist, Whitelist, Registration, RegistrationCenter
from libya_elections.constants import CENTER_ID_MAX_INT_VALUE, CENTER_ID_MIN_INT_VALUE
from libya_elections.phone_numbers import canonicalize_phone_number
from voting.models import Election, RegistrationPeriod
logger = logging.getLogger(__name__)
STATUS_MESSAGE = _(
"Imported {created} new centers. Updated {updated} centers. "
"There were {dupes} duplicates skipped. Blank rows {blank}")
ERRORS_OCCURRED_MESSAGE = _(
"Errors occurred while parsing the uploaded file. No centers were imported or updated."
)
PARSING_ERROR = _(
"Error found in line {line_number}: row does not contain the exact number of columns "
"required or it contains blank lines. The row should only have the following "
" columns: {columns}.")
COULD_NOT_PARSE_ERROR = _(
"Could not parse as a CSV file."
)
FORM_FIELD_ERROR = _(
'Error in row {line_number}. Field: {field_name}. Value: {value}. Error: {error}')
FORM_ERROR = _(
'Error in row {line_number}. Error: {error}')
MIDNIGHT = time(0, 0, 0)
def registration_in_progress(as_of=None):
return RegistrationPeriod.objects.in_progress(as_of=as_of)
def registration_allowed(msg):
"""
Return True if registration is allowed in any form:
- Regular (Tool 1) registration
- Registration changes during SMS Polling (for selected voters)
- These voters will have msg.fields['registration_allowed'] set to True
"""
return (tool_1_enabled()
or msg.fields.get('registration_allowed'))
def tool_1_enabled(as_of=None):
"""SMS voter registration"""
return settings.ENABLE_ALL_TOOLS \
or registration_in_progress(as_of)
def addressed_to_us(func):
"""Handles messages that are addressed to us."""
@wraps(func)
def wrapper(cls, router, msg):
if cls.is_addressed_to_us(msg):
return func(cls, router, msg)
return wrapper
def center_checkin_times(election):
"""The center check-in time starts at midnight local time, 2 days before polling starts,
and ends at the end of polling.
Return the start and stop times for check-in.
"""
two_days_before = election.polling_start_time.date() - timedelta(days=2)
tz = timezone(settings.TIME_ZONE)
activation_start = tz.localize(datetime.combine(two_days_before, MIDNIGHT))
return activation_start, election.polling_end_time
def center_opening_enabled(as_of=None):
"""
The center opening period is from midnight, two days before polling starts,
to the end of polling, for in-person elections.
(This can be overridden by ENABLE_ALL_TOOLS.)
"""
return (settings.ENABLE_ALL_TOOLS
or Election.objects.get_elections_with_center_opening_enabled(as_of).exists())
def phone_activation_enabled(as_of=None):
"""
The phone activation period is from midnight, two days before polling starts,
to the end of polling.
(This can be overridden by ENABLE_ALL_TOOLS.)
"""
return (settings.ENABLE_ALL_TOOLS
or Election.objects.get_elections_with_phone_activation_enabled(as_of).exists())
def preliminary_vote_counts_enabled(as_of=None):
"""
The preliminary vote count submitting period is the same as the polling
reporting period.
(This can be overridden by ENABLE_ALL_TOOLS.)
"""
return polling_reports_enabled(as_of)
def polling_reports_enabled(as_of=None):
"""
The polling reporting period is from the time polling starts until 16 hours
after polling ends.
"""
return (settings.ENABLE_ALL_TOOLS
or Election.objects.get_elections_with_polling_reports_enabled(as_of).exists())
def is_blacklisted(number):
"""
Return True if 'number' is on the blacklist.
"""
blacklist = cache.get('blacklist')
if blacklist is None:
blacklist = set(Blacklist.objects.values_list('phone_number', flat=True))
cache.set('blacklist', blacklist)
return number in blacklist
def is_whitelisted(number):
"""
Return True if 'number' is on the whitelist.
"""
cache_key = 'whitelist:%s' % number
whitelisted = cache.get(cache_key)
if whitelisted is None:
whitelisted = Whitelist.objects.filter(phone_number=number).exists()
if whitelisted:
# Only cache if number is on the whitelist
cache.set(cache_key, whitelisted)
return whitelisted
def remaining_registrations(number):
"""Return how many more registrations can be made using this phone"""
num_already = Registration.objects.filter(sms__from_number=number).count()
remaining = settings.MAX_REGISTRATIONS_PER_PHONE - num_already
return max(0, remaining)
def is_center_id_valid(center_id):
try:
int_center_id = int(center_id)
assert CENTER_ID_MIN_INT_VALUE <= int_center_id <= CENTER_ID_MAX_INT_VALUE
except (AssertionError, TypeError, ValueError):
return False
return True
def import_center_csv_row(columns, row, line_number, stats, errors):
"""Import a CSV row and add, update, ignore, or raise an error as appropriate.
This is a support function for update_center_table().
"""
if any(row):
if len(row) != len(CSV_FIELDS):
errors.append(PARSING_ERROR.format(line_number=line_number, columns=columns))
return
# create a dictionary analogous to request.POST to feed to form
data = dict(list(zip(CSV_FIELDS, row)))
try:
# pull center_id and see if this center exists (so we know whether to update or insert)
center = RegistrationCenter.objects.get(center_id=data['center_id'])
except RegistrationCenter.DoesNotExist:
center = None
except ValueError:
# bad center_id, but we'll validate it properly below
center = None
if center:
# This is an update
action = 'num_updated'
# Set the initial values of our non-model form fields
# so we can tell if they've changed later
with override('ar'):
old_center_type = force_text(center.get_center_type_display())
initial = {
'office_id': center.office.id,
'office': center.office,
'constituency_id': center.constituency.id,
'constituency': center.constituency,
'subconstituency_id': center.subconstituency.id,
'subconstituency': center.subconstituency,
'copy_of': center.copy_of,
'center_type': old_center_type
}
if center.copy_of:
initial['copy_of_id'] = center.copy_of.center_id
form = RegistrationCenterCSVForm(instance=center, initial=initial, data=data)
else:
# This is an insert
action = 'num_created'
form = RegistrationCenterCSVForm(data=data)
if form.is_valid():
if form.has_changed():
logger.debug('The following fields on center have changed %s', form.changed_data)
stats[action] += 1
form.save()
else:
stats['num_dupes'] += 1
else:
for field_name, form_errors in form.errors.items():
for error in form_errors:
if field_name in data:
# this is a field-specific error
errors.append(FORM_FIELD_ERROR.format(line_number=line_number,
field_name=field_name,
value=data[field_name],
error=error))
else:
# this is non-field error
errors.append(FORM_ERROR.format(line_number=line_number, error=error))
else:
stats['num_blank'] += 1
class CenterImportFailedError(Exception):
"""Custom exception raised when CSV center import was not successful"""
pass
def update_center_table(_file):
"""
Import voting centers from a CSV file. It creates or updates.
Safe to run repeatedly; if a voting center already exists with the
center ID being imported it will update it if needed.
Returns a 2-tuple of (message, successful), where message is status information (including
errors, if any) and successful is a Boolean.
If any errors are reported, no imports occur.
"""
errors = []
reader = csv.reader(codecs.iterdecode(_file, 'utf-8'))
stats = {
'num_blank': 0,
'num_created': 0,
'num_dupes': 0,
'num_updated': 0,
}
line_number = 1
columns = ", ".join(CSV_FIELDS)
try:
headers = next(reader) # gets rid of the header row
except UnicodeDecodeError:
# this can happen when the file is not a CSV file jpg png etc...
return COULD_NOT_PARSE_ERROR, False
if not len(headers) == len(CSV_FIELDS):
return PARSING_ERROR.format(line_number=1, columns=columns), False
for index, header in enumerate(headers):
if not header == CSV_FIELDS[index]:
return PARSING_ERROR.format(line_number=1, columns=columns), False
# If errors happen during the import and we want Django to roll
# back the transaction, we need to exit the transaction context
# with an exception (eventually).
try:
with transaction.atomic():
for row in reader:
line_number += 1
import_center_csv_row(columns, row, line_number, stats, errors)
if errors:
errors.insert(0, force_text(ERRORS_OCCURRED_MESSAGE))
message = mark_safe('<br><br>'.join(errors))
logger.debug(errors)
# trigger rollback:
raise CenterImportFailedError
else:
message = STATUS_MESSAGE.format(blank=stats['num_blank'],
created=stats['num_created'],
dupes=stats['num_dupes'],
updated=stats['num_updated'])
except CenterImportFailedError:
# Just to trigger a rollback
logger.debug("Rolled back all imported centers due to errors.")
else:
logger.debug("No errors during import, will commit changes if nothing else goes wrong "
"during the request.")
return message, not bool(errors)
def process_blackwhitelisted_numbers_file(model, import_file):
"""Process a text file with one phone number per line, adding each phone number to the model.
The model must be one of Blacklist or Whitelist. The import_file must be an open file object.
"""
imported = skipped = 0
errors = []
for line_number, line in enumerate(import_file.read().splitlines()):
phone_number = canonicalize_phone_number(line.decode())
if phone_number:
if model.objects.filter(phone_number=phone_number).exists():
skipped += 1
else:
obj = model(phone_number=phone_number)
try:
obj.full_clean()
except ValidationError:
errors.append(str(line_number + 1))
else:
obj.save()
imported += 1
return (imported, skipped, errors)
| [
"logging.getLogger",
"pytz.timezone",
"codecs.iterdecode",
"django.utils.translation.ugettext_lazy",
"datetime.time",
"voting.models.Election.objects.get_elections_with_phone_activation_enabled",
"django.db.transaction.atomic",
"voting.models.Election.objects.get_elections_with_polling_reports_enabled",
"voting.models.RegistrationPeriod.objects.in_progress",
"functools.wraps",
"django.utils.encoding.force_text",
"django.utils.translation.override",
"voting.models.Election.objects.get_elections_with_center_opening_enabled",
"django.core.cache.cache.set",
"datetime.timedelta",
"datetime.datetime.combine",
"django.core.cache.cache.get"
] | [((910, 937), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (927, 937), False, 'import logging\n'), ((956, 1082), 'django.utils.translation.ugettext_lazy', '_', (['"""Imported {created} new centers. Updated {updated} centers. There were {dupes} duplicates skipped. Blank rows {blank}"""'], {}), "('Imported {created} new centers. Updated {updated} centers. There were {dupes} duplicates skipped. Blank rows {blank}'\n )\n", (957, 1082), True, 'from django.utils.translation import override, ugettext_lazy as _\n'), ((1116, 1211), 'django.utils.translation.ugettext_lazy', '_', (['"""Errors occurred while parsing the uploaded file. No centers were imported or updated."""'], {}), "('Errors occurred while parsing the uploaded file. No centers were imported or updated.'\n )\n", (1117, 1211), True, 'from django.utils.translation import override, ugettext_lazy as _\n'), ((1229, 1419), 'django.utils.translation.ugettext_lazy', '_', (['"""Error found in line {line_number}: row does not contain the exact number of columns required or it contains blank lines. The row should only have the following columns: {columns}."""'], {}), "('Error found in line {line_number}: row does not contain the exact number of columns required or it contains blank lines. The row should only have the following columns: {columns}.'\n )\n", (1230, 1419), True, 'from django.utils.translation import override, ugettext_lazy as _\n'), ((1458, 1493), 'django.utils.translation.ugettext_lazy', '_', (['"""Could not parse as a CSV file."""'], {}), "('Could not parse as a CSV file.')\n", (1459, 1493), True, 'from django.utils.translation import override, ugettext_lazy as _\n'), ((1519, 1608), 'django.utils.translation.ugettext_lazy', '_', (['"""Error in row {line_number}. Field: {field_name}. Value: {value}. Error: {error}"""'], {}), "('Error in row {line_number}. Field: {field_name}. Value: {value}. Error: {error}'\n )\n", (1520, 1608), True, 'from django.utils.translation import override, ugettext_lazy as _\n'), ((1622, 1669), 'django.utils.translation.ugettext_lazy', '_', (['"""Error in row {line_number}. Error: {error}"""'], {}), "('Error in row {line_number}. Error: {error}')\n", (1623, 1669), True, 'from django.utils.translation import override, ugettext_lazy as _\n'), ((1687, 1700), 'datetime.time', 'time', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1691, 1700), False, 'from datetime import timedelta, datetime, time\n'), ((1756, 1807), 'voting.models.RegistrationPeriod.objects.in_progress', 'RegistrationPeriod.objects.in_progress', ([], {'as_of': 'as_of'}), '(as_of=as_of)\n', (1794, 1807), False, 'from voting.models import Election, RegistrationPeriod\n'), ((2421, 2432), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2426, 2432), False, 'from functools import wraps\n'), ((2881, 2909), 'pytz.timezone', 'timezone', (['settings.TIME_ZONE'], {}), '(settings.TIME_ZONE)\n', (2889, 2909), False, 'from pytz import timezone\n'), ((4428, 4450), 'django.core.cache.cache.get', 'cache.get', (['"""blacklist"""'], {}), "('blacklist')\n", (4437, 4450), False, 'from django.core.cache import cache\n'), ((4785, 4805), 'django.core.cache.cache.get', 'cache.get', (['cache_key'], {}), '(cache_key)\n', (4794, 4805), False, 'from django.core.cache import cache\n'), ((2854, 2871), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (2863, 2871), False, 'from datetime import timedelta, datetime, time\n'), ((2945, 2988), 'datetime.datetime.combine', 'datetime.combine', (['two_days_before', 'MIDNIGHT'], {}), '(two_days_before, MIDNIGHT)\n', (2961, 2988), False, 'from datetime import timedelta, datetime, time\n'), ((4567, 4600), 'django.core.cache.cache.set', 'cache.set', (['"""blacklist"""', 'blacklist'], {}), "('blacklist', blacklist)\n", (4576, 4600), False, 'from django.core.cache import cache\n'), ((9253, 9286), 'codecs.iterdecode', 'codecs.iterdecode', (['_file', '"""utf-8"""'], {}), "(_file, 'utf-8')\n", (9270, 9286), False, 'import codecs\n'), ((5002, 5035), 'django.core.cache.cache.set', 'cache.set', (['cache_key', 'whitelisted'], {}), '(cache_key, whitelisted)\n', (5011, 5035), False, 'from django.core.cache import cache\n'), ((10169, 10189), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (10187, 10189), False, 'from django.db import transaction\n'), ((3339, 3404), 'voting.models.Election.objects.get_elections_with_center_opening_enabled', 'Election.objects.get_elections_with_center_opening_enabled', (['as_of'], {}), '(as_of)\n', (3397, 3404), False, 'from voting.models import Election, RegistrationPeriod\n'), ((3688, 3755), 'voting.models.Election.objects.get_elections_with_phone_activation_enabled', 'Election.objects.get_elections_with_phone_activation_enabled', (['as_of'], {}), '(as_of)\n', (3748, 3755), False, 'from voting.models import Election, RegistrationPeriod\n'), ((4240, 4306), 'voting.models.Election.objects.get_elections_with_polling_reports_enabled', 'Election.objects.get_elections_with_polling_reports_enabled', (['as_of'], {}), '(as_of)\n', (4299, 4306), False, 'from voting.models import Election, RegistrationPeriod\n'), ((6722, 6736), 'django.utils.translation.override', 'override', (['"""ar"""'], {}), "('ar')\n", (6730, 6736), False, 'from django.utils.translation import override, ugettext_lazy as _\n'), ((10392, 10427), 'django.utils.encoding.force_text', 'force_text', (['ERRORS_OCCURRED_MESSAGE'], {}), '(ERRORS_OCCURRED_MESSAGE)\n', (10402, 10427), False, 'from django.utils.encoding import force_text\n')] |
# -*- coding: utf-8 -*-
#
# Copyright 2017-2021- Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Immutable classes."""
import pytest
from renku.core.metadata.immutable import Immutable, Slots
class A(Slots):
"""Test class."""
__slots__ = ("a_member",)
class B(A):
"""Test class."""
__slots__ = "b_member"
def __init__(self, *, b_member=None, **kwargs):
super().__init__(b_member=b_member, **kwargs)
class C(Immutable):
"""Test class."""
__slots__ = ("c_member",)
def test_instantiate():
"""Test instantiating Slots subclasses."""
b = B(a_member=42, b_member=43)
assert {"a_member": 42, "b_member": 43} == b.__getstate__()
def test_instantiate_incomplete():
"""Test instantiating Slots subclasses without setting all members."""
b = B(a_member=42)
assert {"a_member": 42, "b_member": None} == b.__getstate__()
def test_instantiate_invalid_member():
"""Test instantiating Slots subclasses and passing a non-member."""
with pytest.raises(AttributeError) as e:
B(c_member=42)
assert "object has no attribute 'c_member'" in str(e)
def test_get_all_slots():
"""Test get all slots from an Immutable subclasses."""
b = B(a_member=42, b_member=43)
_ = b.__getstate__()
assert {"b_member", "a_member", "__weakref__"} == set(B.__all_slots__)
def test_immutable_object_id():
"""Test Immutable subclasses have an `id` field."""
c = C(id=42, c_member=43)
assert {"c_member": 43, "id": 42} == c.__getstate__()
def test_cannot_mutate():
"""Test cannot mutate an Immutable subclasses."""
c = C(c_member=42)
with pytest.raises(TypeError) as e:
c.c_member = None
assert "Cannot modify an immutable class" in str(e)
assert 42 == c.c_member
def test_immutable_objects_cache():
"""Test Immutable objects are cached once created."""
data = {"id": 42, "c_member": 43}
o1 = C.make_instance(**data)
o2 = C.make_instance(**data)
assert o1 is o2
def test_immutable_objects_cache_without_id():
"""Test Immutable objects cannot be cached if id is not set."""
data = {"c_member": 43}
o1 = C.make_instance(**data)
o2 = C.make_instance(**data)
assert o1 is not o2
| [
"pytest.raises"
] | [((1678, 1707), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1691, 1707), False, 'import pytest\n'), ((2314, 2338), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2327, 2338), False, 'import pytest\n')] |
#Get all errors from Errors class
from errors import Errors
#Get all configurations from Configuration Class
from config import Configuration
#Import Stock Data and Operations
from stock_database import stock_data
import pandas as pd
#Select the mode of application : 1. Command Line 2. GUI
def user_menu():
operation_menu_txt = Configuration.tabulate_output("OPMENU")
try:
op_mode = int(input(operation_menu_txt+"\nSelect: "))
print("\n"+Errors.INVALID_CHOICE) if (op_mode != 1 and op_mode != 2) else run_app(op_mode)
except ValueError:
print("\n"+Errors.ONLY_NUMBERS)
#Run the application based on user choice
def run_app(app_mode):
command_line() if app_mode == 2 else start_gui()
def command_line():
display_options()
def start_gui():
print("GUI is under construction. Exiting now.")
#Display all the operations for command line
def display_options():
option = 0
disp_menu_txt = Configuration.tabulate_output("DISPOPTMENU")
while (option != 4):
try:
option = int(input("\n"+disp_menu_txt+"\nSelect: "))
perform_operation(option)
except ValueError:
print("\n"+Errors.ONLY_NUMBERS)
#Perform CLI Operations
def perform_operation(op):
if op not in range(1,5):
print("\n"+Errors.INVALID_CHOICE)
elif op == 1:
stock_data.stock_main()
elif op == 2:
report()
elif op == 3:
meta()
else:
pass
def report():
print("\nFeature Rollback")
def meta():
print("\n'https://github.com/skjoshi267/ucd_20200200_shreyankjoshi'")
| [
"stock_database.stock_data.stock_main",
"config.Configuration.tabulate_output"
] | [((334, 373), 'config.Configuration.tabulate_output', 'Configuration.tabulate_output', (['"""OPMENU"""'], {}), "('OPMENU')\n", (363, 373), False, 'from config import Configuration\n'), ((946, 990), 'config.Configuration.tabulate_output', 'Configuration.tabulate_output', (['"""DISPOPTMENU"""'], {}), "('DISPOPTMENU')\n", (975, 990), False, 'from config import Configuration\n'), ((1356, 1379), 'stock_database.stock_data.stock_main', 'stock_data.stock_main', ([], {}), '()\n', (1377, 1379), False, 'from stock_database import stock_data\n')] |
# -*- coding: utf-8 -*-
"""
INTRO
<NAME> (C)
Created on Fri Dec 14 20:40:12 2018
Aerodynamics, AE
TU Delft
"""
from screws.freeze.main import FrozenOnly
from importlib import import_module
class DomainInputAllocator(FrozenOnly):
""" We use this finder to get a `DomainInput`."""
def __init__(self, ID):
assert ID in self.___defined_DI___(), f" <DomainInputFinder> : mesh ID = {ID} is wrong."
cls_name = self.___defined_DI___()[ID]
cls_path = self.___DI_path___()[ID]
self._DomainInput_ = getattr(import_module(cls_path), cls_name)
self._freeze_self_()
def __call__(self, *args, **kwargs):
""""""
return self._DomainInput_(*args, **kwargs)
@classmethod
def ___defined_DI___(cls):
"""Here we store all defined meshComponents. Whenever we define a new meshComponents (actually, a new
domain_input), we add a nickname for it here.
"""
_dict_ = {'crazy': "Crazy",
'crazy_periodic': "CrazyPeriodic",
'bridge_arch_cracked': "BridgeArchCracked",
'psc': "Periodic_Square_Channel",
'pwc': "Parallel_Wall_Channel",
'LDC': "Lid_Driven_Cavity",
'cuboid': "Cuboid",
'cuboid_periodic': "CuboidPeriodic",
}
return _dict_
@classmethod
def ___DI_path___(cls):
""" """
base_path = '.'.join(str(cls).split(' ')[1][1:-2].split('.')[:-2]) + '.'
return {'crazy' : base_path + "crazy",
'crazy_periodic' : base_path + "crazy_periodic",
'bridge_arch_cracked': base_path + "bridge_arch_cracked",
'psc': base_path + "psc",
'pwc': base_path + "pwc",
'LDC': base_path + "LDC",
'cuboid': base_path + "cuboid",
'cuboid_periodic': base_path + "cuboid_periodic",
} | [
"importlib.import_module"
] | [((540, 563), 'importlib.import_module', 'import_module', (['cls_path'], {}), '(cls_path)\n', (553, 563), False, 'from importlib import import_module\n')] |
from conans import ConanFile, CMake, tools
import re
def parse_cmakelists(regex, from_match):
try:
cmakelists = tools.load('CMakeLists.txt')
data = from_match(re.search(regex, cmakelists))
return data.strip()
except:
return None
def cmakelists_version():
return parse_cmakelists(r'project\(.*VERSION\s+(\S*).*\)',
lambda m: m.group(1))
def cmakelists_description():
return parse_cmakelists(r'project\(.*DESCRIPTION\s+"([^"]*?)".*\)',
lambda m: m.group(1))
class NickelConanFile(ConanFile):
name = 'nickel'
version = cmakelists_version()
description = cmakelists_description()
url = 'https://github.com/Quincunx271/nickel'
license = 'BSL-1.0'
no_copy_source = True
generators = 'cmake'
build_requires = (
'Catch2/2.5.0@catchorg/stable',
'boost/1.74.0',
)
default_options = {'boost:header_only': True}
exports_sources = 'pmm.cmake', 'cmake/*', 'include/*', 'CMakeLists.txt', 'LICENSE.txt'
def _configure_cmake(self):
cmake = CMake(self)
cmake.configure(defs={
'BUILD_TESTING': False,
})
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy('LICENSE.txt', 'licenses')
| [
"conans.tools.load",
"conans.CMake",
"re.search"
] | [((126, 154), 'conans.tools.load', 'tools.load', (['"""CMakeLists.txt"""'], {}), "('CMakeLists.txt')\n", (136, 154), False, 'from conans import ConanFile, CMake, tools\n'), ((1067, 1078), 'conans.CMake', 'CMake', (['self'], {}), '(self)\n', (1072, 1078), False, 'from conans import ConanFile, CMake, tools\n'), ((181, 209), 're.search', 're.search', (['regex', 'cmakelists'], {}), '(regex, cmakelists)\n', (190, 209), False, 'import re\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Make the master table
import os
import sys
import pybedtools as pbt
import pandas as pd
import numpy as np
import subprocess as sp
import json
os.chdir('/mnt/BioHome/jreyna/jreyna/projects/dchallenge/')
pbt.set_bedtools_path('/mnt/BioApps/bedtools/bin/')
bgzip = '/mnt/BioApps/tabix/tabix-0.2.6/bgzip'
tabix = '/mnt/BioApps/tabix/tabix-0.2.6/tabix'
bedpe_6cols = ['chrA', 'startA', 'endA', 'chrB', 'startB', 'endB']
bedpe_10cols = ['chrA', 'startA', 'endA', 'chrB', 'startB', 'endB', 'name', 'score', 'strand1', 'strand2']
## default values for the command line
#sys.argv = [0] * 8
#sys.argv[1] = 'results/main/2021_Nikhil_eQTL/Results/Colocalization/T1D_34012112_Gaulton/'
#sys.argv[1] += 'BLUEPRINT_eQTL_Monocyte/FINAL_Summary_Coloc_Gene_SNP_Pairs.bed'
#sys.argv[2] = 'results/refs/ensembl/gencode.v19.annotation.bed'
#sys.argv[3] = 'results/main/2021_Nikhil_eQTL/Data/FitHiChIP_Loops/CM/FitHiChIP_L/FitHiChIP.interactions_FitHiC_Q0.01.bed'
#sys.argv[4] = 'results/refs/spp/SPP_D-Challenge_networks.xlsx'
#sys.argv[5] = 'results/refs/hg19/hg19.chrom.sizes'
#sys.argv[6] = 'results/main/2021_Nikhil_eQTL/Data/eqtl_sqtl_summ_stats/BLUEPRINT_eQTL/Monocyte.txt.gz'
#sys.argv[7] = 'results/main/loop_analysis/washU/'
# parsing the commandline arguments
coloc_fn = sys.argv[1]
genes_fn = sys.argv[2]
loop_fn = sys.argv[3]
spp_fn = sys.argv[4]
gs_fn = sys.argv[5]
eqtl_fn = sys.argv[6]
outdir = sys.argv[7]
# setting the output file names
os.makedirs(outdir, exist_ok=True)
# ## Load the colocalization data
# load the colocalization data
coloc = pd.read_table(coloc_fn)
# extract the most significant according the H4
coloc_sig_df = coloc[coloc['pp_H4_Coloc_Summary'] > 0.75]
coloc_sig_df.rename(columns={'pos': 'end'}, inplace=True)
coloc_sig_df.loc[:, 'start'] = coloc_sig_df.loc[:, 'end'] - 1
coloc_sig_full = coloc_sig_df.copy(deep=True)
coloc_sig_df = coloc_sig_df[['chr', 'start', 'end', 'rs_id', 'variant_id']]
coloc_sig_df = coloc_sig_df.loc[~coloc_sig_df.duplicated(subset='rs_id'),]
coloc_sig_pbt = pbt.BedTool.from_dataframe(coloc_sig_df.iloc[:, 0:4]).sort()
#csnp_slop_pbt = coloc_sig_pbt.slop(b=500000, g=gs_fn)
# ## Load the gene data
# load the gencode coords
cols = ['chrom', 'start', 'end', 'strand', 'type', 'gene_id', 'gene_name']
gencode = pd.read_table(genes_fn, header=None, names=cols)
# extract just the genes
genes_df = gencode.loc[gencode.type.isin(['gene'])]
genes_df = genes_df.loc[~genes_df.duplicated(subset='gene_id'), :]
genes_df.loc[:, 'chrom'] = genes_df['chrom'].astype(str)
genes_df = genes_df.iloc[:, [0,1,2,6,5]]
genes_pbt = pbt.BedTool.from_dataframe(genes_df).sort()
# ## Find the closest gene
closest_gene = coloc_sig_pbt.closest(genes_pbt, d=True)
closest_gene = closest_gene.to_dataframe()
closest_gene = closest_gene.iloc[:, [3,7,8,9]]
closest_gene.columns = ['rs_id', 'cls_gname', 'cls_id', 'cls_dist']
closest_gene.head()
uniq_cls_gname = closest_gene.groupby(['rs_id']).cls_gname.apply(lambda x: ','.join(x))
uniq_cls_ids = closest_gene.groupby(['rs_id']).cls_id.apply(lambda x: ','.join(x))
uniq_cls_dist = closest_gene.groupby(['rs_id']).cls_dist.apply(lambda x: ','.join([str(i) for i in x]))
uniq_cls = pd.merge(uniq_cls_gname, uniq_cls_ids, left_index=True, right_index=True)
uniq_cls = pd.merge(uniq_cls, uniq_cls_dist, left_index=True, right_index=True)
uniq_cls.reset_index(inplace=True)
# In[9]:
uniq_cls
# ## Find all genes +/- 500kb
# In[10]:
# get a list of gene names within +- 500kb of the SNPs
fivekb_gnames = coloc_sig_pbt.slop(b=500000, g=gs_fn)
fivekb_gnames = fivekb_gnames.map(genes_pbt, c=4, o='collapse')
fivekb_gnames = fivekb_gnames.to_dataframe()
fivekb_gnames = fivekb_gnames.iloc[:, [3,4]]
fivekb_gnames.columns = ['rs_id', 'gene_name']
# get a list of gene ids within +- 500kb of the SNPs
fivekb_gids = coloc_sig_pbt.slop(b=500000, g=gs_fn)
fivekb_gids = fivekb_gids.map(genes_pbt, c=5, o='collapse')
fivekb_gids = fivekb_gids.to_dataframe()
fivekb_gids = fivekb_gids.iloc[:, [3,4]]
fivekb_gids.columns = ['rs_id', 'gene_id']
# merge the two above results
fivekb_genes = fivekb_gnames.merge(fivekb_gids, on='rs_id')
fivekb_genes.columns = ['rs_id', '5kb_gname', '5kb_gid']
# In[11]:
# get eQTL's
eqtls = pd.read_table(eqtl_fn)
eqtls.columns = ['eqtl_gname', 'nvar', 'shape1', 'shape2', 'dummy',
'sid', 'dist', 'npval', 'slope', 'ppval', 'bpval', 'qval']
# ## Get the loops
# In[12]:
# load the loop data
loops = pd.read_table(loop_fn)
tmp_loops = loops[['chr1', 's1', 'e1', 'chr2', 's2', 'e2']]
tmp_loops.rename(columns={'p': 'score'}, inplace=True)
tmp_loops.loc[:, 'name'] = '.'
tmp_loops.loc[:, 'score'] = loops['p']
tmp_loops.loc[:, 'strand1'] = '.'
tmp_loops.loc[:, 'strand2'] = '.'
loops = pbt.BedTool.from_dataframe(tmp_loops)
print('FitHiChIP found {} significant loops.'.format(tmp_loops.shape[0]))
# #### Find out SNP - 5kb gene pairs with loops
# In[13]:
fivekb_genesI = coloc_sig_pbt.slop(b=500000, g=gs_fn)
fivekb_genesI = fivekb_genesI.intersect(genes_pbt, wa=True, wb=True)
fivekb_genesI = fivekb_genesI.to_dataframe()
fivekb_genesI['start'] += 500000
fivekb_genesI['end'] -= 500000
# re-arranging to fit bedpe format
fivekb_genesI = fivekb_genesI.iloc[:, [0,1,2,4,5,6,3,7,8]]
fivekb_genesI['dummy'] = 'drop'
# loading into pbt
fivekb_genesI = pbt.BedTool.from_dataframe(fivekb_genesI)
fivekb_genesI = fivekb_genesI.pair_to_pair(loops, type='both', **{'is':True})
fivekb_genesI = fivekb_genesI.to_dataframe(disable_auto_names=True, header=None)
fivekb_genesI = fivekb_genesI.iloc[:, [10, 11, 12, 13, 14, 15,6,7,8,17]]
fivekb_genesI.columns = bedpe_6cols + ['rs_id', 'gname', 'gid', 'fithic_qval']
# #### Find out SNP - eQTL gene pairs with loops
# ## Generate the master table
master = coloc_sig_full.copy()
# add sid which is the chr:position of the SNP
master['sid'] = master['chr'].str.replace('chr', '') + ':' + master['end'].astype(str)
# add the closest gene
master = master.merge(uniq_cls, on='rs_id', how='left')
# add the +/- fivekb genes
master = master.merge(fivekb_genes, on='rs_id', how='left')
# add the eQTL data
eqtl_genes = master.merge(eqtls[['sid', 'eqtl_gname']], on='sid')
eqtl_genes = eqtl_genes.groupby('rs_id').eqtl_gname.unique()
eqtl_genes = eqtl_genes.apply(lambda x: ','.join(x))
master = master.merge(eqtl_genes, on='rs_id', how='left')
new_colnames = [
'rs_id',
'variant_id',
'chr',
'start',
'end',
'geneName',
'eqtl_gname',
'cls_gname',
'5kb_gname',
'pp_H0_Coloc_Summary',
'pp_H1_Coloc_Summary',
'pp_H2_Coloc_Summary',
'pp_H3_Coloc_Summary',
'pp_H4_Coloc_Summary',
'dist',
'pvalue',
'FDR',
'slope_snp',
'ref',
'alt',
'AC',
'AF',
'AN',
'slope_se_snp',
'slope_gwas',
'slope_se_gwas',
'pval_nominal',
'SampleSize',
'sid',
'cls_id',
'cls_dist',
'5kb_gid'
]
master = master.loc[:, new_colnames]
master.rename(columns={'geneName': 'coloc_gname',
'end': 'pos',
'eqtl_gname': 'eqtl_gnames',
'cls_gname': 'cls_gnames',
'cls_id': 'cls_ids'}, inplace=True)
master.drop(['start'], axis=1, inplace=True)
sg_with_loops = fivekb_genesI[['rs_id', 'gname']].values.tolist()
sg_with_loops = set([tuple(x) for x in sg_with_loops])
coloc_loops = []
eqtl_loops = []
closest_loops = []
fivekb_loops = []
for i, sr in master.iterrows():
# analyze colocs
gene = sr.coloc_gname
bools = 0
if (sr.rs_id, gene) in sg_with_loops:
bools = 1
coloc_loops.append(bools)
# analyze qtls
bools = []
for gene in sr.eqtl_gnames.split(','):
eqtl_bool = 0
if (sr.rs_id, gene) in sg_with_loops:
eqtl_bool = 1
bools.append(eqtl_bool)
eqtl_loops.append(bools)
# analyze closest genes
bools = []
for gene in sr.cls_gnames.split(','):
cls_bool = 0
if (sr.rs_id, gene) in sg_with_loops:
cls_bool = 1
bools.append(cls_bool)
closest_loops.append(bools)
# analyze closest genes
bools = []
for gene in sr['5kb_gname'].split(','):
cls_bool = 0
if (sr.rs_id, gene) in sg_with_loops:
cls_bool = 1
bools.append(cls_bool)
fivekb_loops.append(bools)
master['coloc_loops'] = coloc_loops
master['eqtl_loops'] = eqtl_loops
master['closest_loops'] = closest_loops
master['fivekb_loops'] = fivekb_loops
master['fivekb_loops'].iloc[2]
fn = os.path.join(outdir, 'master.tsv')
master.to_csv(fn, sep='\t', header=True, index=False)
fn = os.path.join(outdir, 'master.xlsx')
excel_master = master.sort_values('rs_id').set_index('rs_id')
excel_master.to_excel(fn)
| [
"pybedtools.BedTool.from_dataframe",
"os.makedirs",
"pybedtools.set_bedtools_path",
"pandas.merge",
"os.path.join",
"os.chdir",
"pandas.read_table"
] | [((188, 247), 'os.chdir', 'os.chdir', (['"""/mnt/BioHome/jreyna/jreyna/projects/dchallenge/"""'], {}), "('/mnt/BioHome/jreyna/jreyna/projects/dchallenge/')\n", (196, 247), False, 'import os\n'), ((248, 299), 'pybedtools.set_bedtools_path', 'pbt.set_bedtools_path', (['"""/mnt/BioApps/bedtools/bin/"""'], {}), "('/mnt/BioApps/bedtools/bin/')\n", (269, 299), True, 'import pybedtools as pbt\n'), ((1484, 1518), 'os.makedirs', 'os.makedirs', (['outdir'], {'exist_ok': '(True)'}), '(outdir, exist_ok=True)\n', (1495, 1518), False, 'import os\n'), ((1594, 1617), 'pandas.read_table', 'pd.read_table', (['coloc_fn'], {}), '(coloc_fn)\n', (1607, 1617), True, 'import pandas as pd\n'), ((2314, 2362), 'pandas.read_table', 'pd.read_table', (['genes_fn'], {'header': 'None', 'names': 'cols'}), '(genes_fn, header=None, names=cols)\n', (2327, 2362), True, 'import pandas as pd\n'), ((3212, 3285), 'pandas.merge', 'pd.merge', (['uniq_cls_gname', 'uniq_cls_ids'], {'left_index': '(True)', 'right_index': '(True)'}), '(uniq_cls_gname, uniq_cls_ids, left_index=True, right_index=True)\n', (3220, 3285), True, 'import pandas as pd\n'), ((3297, 3365), 'pandas.merge', 'pd.merge', (['uniq_cls', 'uniq_cls_dist'], {'left_index': '(True)', 'right_index': '(True)'}), '(uniq_cls, uniq_cls_dist, left_index=True, right_index=True)\n', (3305, 3365), True, 'import pandas as pd\n'), ((4252, 4274), 'pandas.read_table', 'pd.read_table', (['eqtl_fn'], {}), '(eqtl_fn)\n', (4265, 4274), True, 'import pandas as pd\n'), ((4482, 4504), 'pandas.read_table', 'pd.read_table', (['loop_fn'], {}), '(loop_fn)\n', (4495, 4504), True, 'import pandas as pd\n'), ((4766, 4803), 'pybedtools.BedTool.from_dataframe', 'pbt.BedTool.from_dataframe', (['tmp_loops'], {}), '(tmp_loops)\n', (4792, 4803), True, 'import pybedtools as pbt\n'), ((5336, 5377), 'pybedtools.BedTool.from_dataframe', 'pbt.BedTool.from_dataframe', (['fivekb_genesI'], {}), '(fivekb_genesI)\n', (5362, 5377), True, 'import pybedtools as pbt\n'), ((8489, 8523), 'os.path.join', 'os.path.join', (['outdir', '"""master.tsv"""'], {}), "(outdir, 'master.tsv')\n", (8501, 8523), False, 'import os\n'), ((8584, 8619), 'os.path.join', 'os.path.join', (['outdir', '"""master.xlsx"""'], {}), "(outdir, 'master.xlsx')\n", (8596, 8619), False, 'import os\n'), ((2060, 2113), 'pybedtools.BedTool.from_dataframe', 'pbt.BedTool.from_dataframe', (['coloc_sig_df.iloc[:, 0:4]'], {}), '(coloc_sig_df.iloc[:, 0:4])\n', (2086, 2113), True, 'import pybedtools as pbt\n'), ((2618, 2654), 'pybedtools.BedTool.from_dataframe', 'pbt.BedTool.from_dataframe', (['genes_df'], {}), '(genes_df)\n', (2644, 2654), True, 'import pybedtools as pbt\n')] |
import time
import search_engine.utils as utils
import multiprocessing
from search_engine import seq_list_numeric
from search_engine import seq_list
from .math_exp_eval_engine import NumericStringParser
"""
This file contains the implementations for formula lookup algorithm
"""
return_dic: dict
progress: multiprocessing.Value
lock: multiprocessing.Manager().Lock
_NUMBER_OF_THREADS = 8
def is_expression_correct(exp_input: str):
"""
A function to validate the syntax of the input that represents the terms formula lookup
exp_input: The terms lookup formula.
returns: True if the syntax is valid, False otherwise.
"""
try:
nsp = NumericStringParser()
# Parse terms and trim spaces
exp_input = exp_input.lower().split(',')
exp_input = list(map(lambda term_of_sequence: str(term_of_sequence).strip(), exp_input))
# Calculate and Compare
for i in range(len(exp_input)):
term = exp_input[i].replace('n', '1000')
x = nsp.eval(term)
return True
except:
return False
def formula_lookup_terms_by_terms(nsp: NumericStringParser, exp_input: list, sequence: list, n_index: int):
"""
This method will search about sequences using terms lookup formula (n -10, n, n * 10, n * 20)
nsp: Instance of NumericStringParser
exp_input: The terms lookup formula.
sequence: A sequence that must have the same length of terms as the terms lookup formula
n_index: The index of the term the represents n
returns: True if matched, False otherwise.
"""
n = sequence[n_index]
# Calculate and Compare
for i in range(len(exp_input)):
term = exp_input[i].replace('n', str(n))
if nsp.eval(term) != sequence[i]:
return False
return True
def parse_expression(exp_input: str):
exp_input = exp_input.lower().split(',')
exp_input = list(map(lambda x: str(x).strip(), exp_input))
return exp_input
def get_index_of_term_n(exp_input: list):
for i in range(len(exp_input)):
if exp_input[i] == 'n':
return i
def formula_lookup_linear_search(exp_input: str, range_list):
"""
This method will search about sequences using terms lookup formula (n -10, n, n * 10, n * 20)
exp_input: The terms lookup formula.
start_index: The start index of seq_list_numeric to begin the search with it. Included ---> [start_index, end_index)
end_index: The end index of seq_list_numeric to end the search with it. Not included ---> [start_index, end_index)
returns: A dictionary where the key represents the sequence and the value represents terms index where the
formula has matched.
"""
# Parse terms and trim spaces
global progress
global return_dic
global lock
if not ('lock' in vars() or 'lock' in globals()):
# Make it works without a direct call from formula_lookup
lock = multiprocessing.Manager().Lock()
progress = multiprocessing.Value('i', 0)
return_dic = multiprocessing.Manager().dict()
exp_input = parse_expression(exp_input)
n_index = get_index_of_term_n(exp_input)
number_of_terms = len(exp_input)
nsp = NumericStringParser()
# Iterate over sequences
for i in range_list:
seq = list(seq_list_numeric[i])
with lock:
progress.value += 1
# Iterate over terms one by one
for d in range(len(seq) - number_of_terms):
seq_cut = seq[d:(d + number_of_terms)]
if formula_lookup_terms_by_terms(nsp, exp_input, seq_cut, n_index):
return_dic[seq_list[i]] = d
break
return return_dic
def range_split(range_to_split, number_of_groups):
k, m = divmod(len(range_to_split), number_of_groups)
return (range_to_split[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(number_of_groups))
def formula_lookup(exp_input: str):
"""
This method will search about sequences using terms lookup formula (n -10, n, n * 10, n * 20)
exp_input: The terms lookup formula.
returns: A dictionary where the key represents the sequence and the value represents terms index where the
formula has matched.
"""
global return_dic
global progress
global lock
lock = multiprocessing.Manager().Lock()
progress = multiprocessing.Value('i', 0)
return_dic = multiprocessing.Manager().dict()
if not is_expression_correct(exp_input):
raise ValueError("The expression syntax is wrong!")
# Divide Range
process_list = []
number_of_sequences = len(seq_list_numeric)
indices_range = list(range_split(range(number_of_sequences), _NUMBER_OF_THREADS))
# Start Processes
for i in range(_NUMBER_OF_THREADS):
proc = multiprocessing.Process(target=formula_lookup_linear_search, args=(exp_input, indices_range[i]))
proc.start()
process_list.append(proc)
# Wait and Echo
index = 0
while progress.value != number_of_sequences:
utils.waiting_with_index(index, progress.value, number_of_sequences)
index += 1
time.sleep(0.25)
print("")
# Join
for process in process_list:
process.join()
# Return
return return_dic
| [
"multiprocessing.Process",
"multiprocessing.Value",
"time.sleep",
"multiprocessing.Manager",
"search_engine.utils.waiting_with_index"
] | [((337, 362), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (360, 362), False, 'import multiprocessing\n'), ((4358, 4387), 'multiprocessing.Value', 'multiprocessing.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (4379, 4387), False, 'import multiprocessing\n'), ((2987, 3016), 'multiprocessing.Value', 'multiprocessing.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (3008, 3016), False, 'import multiprocessing\n'), ((4798, 4899), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'formula_lookup_linear_search', 'args': '(exp_input, indices_range[i])'}), '(target=formula_lookup_linear_search, args=(\n exp_input, indices_range[i]))\n', (4821, 4899), False, 'import multiprocessing\n'), ((5042, 5110), 'search_engine.utils.waiting_with_index', 'utils.waiting_with_index', (['index', 'progress.value', 'number_of_sequences'], {}), '(index, progress.value, number_of_sequences)\n', (5066, 5110), True, 'import search_engine.utils as utils\n'), ((5138, 5154), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (5148, 5154), False, 'import time\n'), ((4310, 4335), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (4333, 4335), False, 'import multiprocessing\n'), ((4405, 4430), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (4428, 4430), False, 'import multiprocessing\n'), ((2935, 2960), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (2958, 2960), False, 'import multiprocessing\n'), ((3038, 3063), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (3061, 3063), False, 'import multiprocessing\n')] |
# coding: utf8
import os
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm_notebook
from argparse import Namespace
from examples.commons import helper
from examples.surname_classification_with_cnn.cnn import CNN
from examples.surname_classification_with_cnn.surname_dataset import SurnameDataset
args = Namespace(
# Data and path information
surname_csv="../../data/surnames/output_munged.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="../../model_storage/surnames_classification/cnn",
# Model hyper parameters
hidden_dim=100,
num_channels=256,
# Training hyper parameters
seed=7676,
num_epochs=100,
early_stopping_criteria=5,
learning_rate=0.001,
batch_size=128,
dropout_probability=0.1,
# Runtime options
cuda=False,
reload_from_files=False,
expand_filepaths_to_save_dir=True,
catch_keyboard_interrupt=True
)
def predict_nationality(surname, classifier, vectorizer):
vectorized_surname = vectorizer.vectorize(surname)
vectorized_surname = torch.tensor(vectorized_surname).view(1, -1)
result = classifier(vectorized_surname, apply_softmax=True)
probability_value, index = result.max(dim=1)
index = index.item()
predicted_nationality = vectorizer.nationality_vocab.lookup_index(index)
probability_value = probability_value.item()
return {"nationality": predicted_nationality,
"probability": probability_value}
def predict_topk_nationalities(surname, classifier, vectorizer, k=5):
vectorized_surname = vectorizer.vectorize(surname)
vectorized_surname = torch.tensor(vectorized_surname).view(1, -1)
prediction_vector = classifier(vectorized_surname, apply_softmax=True)
probability_values, indices = torch.topk(prediction_vector, k=k)
# returned size is 1, k
probability_values = probability_values.detach().numpy()[0]
indices = indices.detach().numpy()[0]
results = []
for prob_value, index in zip(probability_values, indices):
nationality = vectorizer.nationality_vocab.lookup_index(index)
results.append({"nationality": nationality,
"probability": prob_value})
return results
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir, args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir, args.model_state_file)
print("Expanded filepaths:")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
helper.set_seed_everywhere(args.seed, args.cuda)
# handle dirs
helper.handle_dirs(args.save_dir)
# Initializations
if args.reload_from_files:
# training from a checkpoint
print("Loading dataset and vectorizer")
dataset = SurnameDataset.load_dataset_and_load_vectorizer(
args.surname_csv, args.vectorizer_file)
else:
print("Loading dataset and creating vectorizer")
# create dataset and vectorizer
dataset = SurnameDataset.load_dataset_and_make_vectorizer(args.surname_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
# model
classifier = CNN(initial_num_channels=len(vectorizer.character_vocab),
num_classes=len(vectorizer.nationality_vocab),
num_channels=args.num_channels)
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
# loss and optimier
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode="min",
factor=0.5,
patience=1)
# Training loop
train_state = helper.make_train_state(args)
epoch_bar = tqdm_notebook(desc="training routine",
total=args.num_epochs,
position=0)
dataset.set_split("train")
train_bar = tqdm_notebook(desc="split=train",
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split("val")
val_bar = tqdm_notebook(desc="split=val",
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
train_state["epoch_index"] = epoch_index
#
# Iterate over training dataset
#
# setup: batch generator, set loss and acc to 0, set train mode on
dataset.set_split("train")
batch_generator = helper.generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
classifier.train()
for batch_index, batch_dict in enumerate(batch_generator):
# the training routine is 5 steps:
# step 1. zero the gradients
optimizer.zero_grad()
# step 2. compute the output
y_pred = classifier(x_surname=batch_dict["x_surname"])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict["y_nationality"])
loss_batch = loss.item()
running_loss += (loss_batch - running_loss) / (batch_index + 1)
# step 4. use loss to produce gradients
loss.backward()
# step 5. use optimizer to take gradient step
optimizer.step()
# -------------------------------------------
# compute the accuracy
acc_batch = helper.compute_accuracy(y_pred, batch_dict["y_nationality"])
running_acc += (acc_batch - running_acc) / (batch_index + 1)
# update bar
train_bar.set_postfix(loss=running_loss,
acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state["train_loss"].append(running_loss)
train_state["train_acc"].append(running_acc)
#
# Iterate over val dataset
#
# setup: batch generator, set loss and acc to 0, set eval mode on
dataset.set_split('val')
batch_generator = helper.generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# step 1. compute the output
y_pred = classifier(x_surname=batch_dict["x_surname"])
# step 2. compute the loss
loss = loss_func(y_pred, batch_dict["y_nationality"])
loss_batch = loss.item()
running_loss += (loss_batch - running_loss) / (batch_index + 1)
# step 3. compute the accuracy
acc_batch = helper.compute_accuracy(y_pred, batch_dict["y_nationality"])
running_acc += (acc_batch - running_acc) / (batch_index + 1)
# update val_bar
val_bar.set_postfix(loss=running_loss,
acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state["val_loss"].append(running_loss)
train_state["val_acc"].append(running_acc)
train_state = helper.update_train_state(args=args,
model=classifier,
train_state=train_state)
scheduler.step(train_state["val_loss"][-1])
if train_state["stop_early"]:
break
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
print(f"Epoch {epoch_index + 1}")
except KeyboardInterrupt:
print("Exiting loop")
# Test
classifier.load_state_dict(torch.load(train_state["model_filename"]))
classifier = classifier.to(args.device)
dataset.set_split("test")
batch_generator = helper.generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(x_surname=batch_dict["x_surname"])
# compute the loss
loss = loss_func(y_pred, batch_dict["y_nationality"])
loss_batch = loss.item()
running_loss += (loss_batch - running_loss) / (batch_index + 1)
# compute the accuracy
acc_batch = helper.compute_accuracy(y_pred, batch_dict["y_nationality"])
running_acc += (acc_batch - running_acc) / (batch_index + 1)
train_state["test_loss"] = running_loss
train_state["test_acc"] = running_acc
print(f"Test loss: {train_state['test_loss']}")
print(f"Test acc: {train_state['test_acc']}")
# Inference
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
prediction = predict_nationality(new_surname, classifier, vectorizer)
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction["nationality"],
prediction["probability"]))
# Top-K Inference
# new_surname = input("Enter a surname to classify: ")
# classifier = classifier.to("cpu")
k = int(input("How many of the top predictions to see? "))
if k > len(vectorizer.nationality_vocab):
print("Sorry! That's more than the # of nationalities we have.. defaulting you to max size :)")
k = len(vectorizer.nationality_vocab)
predictions = predict_topk_nationalities(new_surname, classifier, vectorizer, k=k)
print("Top {} predictions: ".format(k))
print("==============================")
for prediction in predictions:
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction["nationality"],
prediction["probability"]))
| [
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.nn.CrossEntropyLoss",
"torch.device",
"tqdm.tqdm_notebook",
"examples.commons.helper.handle_dirs",
"examples.commons.helper.generate_batches",
"torch.topk",
"torch.load",
"os.path.join",
"examples.commons.helper.update_train_state",
"examples.surname_classification_with_cnn.surname_dataset.SurnameDataset.load_dataset_and_make_vectorizer",
"torch.tensor",
"torch.cuda.is_available",
"argparse.Namespace",
"examples.commons.helper.make_train_state",
"examples.surname_classification_with_cnn.surname_dataset.SurnameDataset.load_dataset_and_load_vectorizer",
"examples.commons.helper.compute_accuracy",
"examples.commons.helper.set_seed_everywhere"
] | [((341, 806), 'argparse.Namespace', 'Namespace', ([], {'surname_csv': '"""../../data/surnames/output_munged.csv"""', 'vectorizer_file': '"""vectorizer.json"""', 'model_state_file': '"""model.pth"""', 'save_dir': '"""../../model_storage/surnames_classification/cnn"""', 'hidden_dim': '(100)', 'num_channels': '(256)', 'seed': '(7676)', 'num_epochs': '(100)', 'early_stopping_criteria': '(5)', 'learning_rate': '(0.001)', 'batch_size': '(128)', 'dropout_probability': '(0.1)', 'cuda': '(False)', 'reload_from_files': '(False)', 'expand_filepaths_to_save_dir': '(True)', 'catch_keyboard_interrupt': '(True)'}), "(surname_csv='../../data/surnames/output_munged.csv',\n vectorizer_file='vectorizer.json', model_state_file='model.pth',\n save_dir='../../model_storage/surnames_classification/cnn', hidden_dim=\n 100, num_channels=256, seed=7676, num_epochs=100,\n early_stopping_criteria=5, learning_rate=0.001, batch_size=128,\n dropout_probability=0.1, cuda=False, reload_from_files=False,\n expand_filepaths_to_save_dir=True, catch_keyboard_interrupt=True)\n", (350, 806), False, 'from argparse import Namespace\n'), ((2673, 2717), 'torch.device', 'torch.device', (["('cuda' if args.cuda else 'cpu')"], {}), "('cuda' if args.cuda else 'cpu')\n", (2685, 2717), False, 'import torch\n'), ((2792, 2840), 'examples.commons.helper.set_seed_everywhere', 'helper.set_seed_everywhere', (['args.seed', 'args.cuda'], {}), '(args.seed, args.cuda)\n', (2818, 2840), False, 'from examples.commons import helper\n'), ((2856, 2889), 'examples.commons.helper.handle_dirs', 'helper.handle_dirs', (['args.save_dir'], {}), '(args.save_dir)\n', (2874, 2889), False, 'from examples.commons import helper\n'), ((3717, 3759), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', (['dataset.class_weights'], {}), '(dataset.class_weights)\n', (3736, 3759), True, 'import torch.nn as nn\n'), ((3843, 3940), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', ([], {'optimizer': 'optimizer', 'mode': '"""min"""', 'factor': '(0.5)', 'patience': '(1)'}), "(optimizer=optimizer, mode='min',\n factor=0.5, patience=1)\n", (3879, 3940), True, 'import torch.optim as optim\n'), ((4116, 4145), 'examples.commons.helper.make_train_state', 'helper.make_train_state', (['args'], {}), '(args)\n', (4139, 4145), False, 'from examples.commons import helper\n'), ((4159, 4232), 'tqdm.tqdm_notebook', 'tqdm_notebook', ([], {'desc': '"""training routine"""', 'total': 'args.num_epochs', 'position': '(0)'}), "(desc='training routine', total=args.num_epochs, position=0)\n", (4172, 4232), False, 'from tqdm import tqdm_notebook\n'), ((8549, 8634), 'examples.commons.helper.generate_batches', 'helper.generate_batches', (['dataset'], {'batch_size': 'args.batch_size', 'device': 'args.device'}), '(dataset, batch_size=args.batch_size, device=args.device\n )\n', (8572, 8634), False, 'from examples.commons import helper\n'), ((1817, 1851), 'torch.topk', 'torch.topk', (['prediction_vector'], {'k': 'k'}), '(prediction_vector, k=k)\n', (1827, 1851), False, 'import torch\n'), ((2330, 2379), 'os.path.join', 'os.path.join', (['args.save_dir', 'args.vectorizer_file'], {}), '(args.save_dir, args.vectorizer_file)\n', (2342, 2379), False, 'import os\n'), ((2409, 2459), 'os.path.join', 'os.path.join', (['args.save_dir', 'args.model_state_file'], {}), '(args.save_dir, args.model_state_file)\n', (2421, 2459), False, 'import os\n'), ((2610, 2635), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2633, 2635), False, 'import torch\n'), ((3028, 3120), 'examples.surname_classification_with_cnn.surname_dataset.SurnameDataset.load_dataset_and_load_vectorizer', 'SurnameDataset.load_dataset_and_load_vectorizer', (['args.surname_csv', 'args.vectorizer_file'], {}), '(args.surname_csv, args.\n vectorizer_file)\n', (3075, 3120), False, 'from examples.surname_classification_with_cnn.surname_dataset import SurnameDataset\n'), ((3234, 3299), 'examples.surname_classification_with_cnn.surname_dataset.SurnameDataset.load_dataset_and_make_vectorizer', 'SurnameDataset.load_dataset_and_make_vectorizer', (['args.surname_csv'], {}), '(args.surname_csv)\n', (3281, 3299), False, 'from examples.surname_classification_with_cnn.surname_dataset import SurnameDataset\n'), ((8421, 8462), 'torch.load', 'torch.load', (["train_state['model_filename']"], {}), "(train_state['model_filename'])\n", (8431, 8462), False, 'import torch\n'), ((9137, 9197), 'examples.commons.helper.compute_accuracy', 'helper.compute_accuracy', (['y_pred', "batch_dict['y_nationality']"], {}), "(y_pred, batch_dict['y_nationality'])\n", (9160, 9197), False, 'from examples.commons import helper\n'), ((5020, 5105), 'examples.commons.helper.generate_batches', 'helper.generate_batches', (['dataset'], {'batch_size': 'args.batch_size', 'device': 'args.device'}), '(dataset, batch_size=args.batch_size, device=args.device\n )\n', (5043, 5105), False, 'from examples.commons import helper\n'), ((6735, 6820), 'examples.commons.helper.generate_batches', 'helper.generate_batches', (['dataset'], {'batch_size': 'args.batch_size', 'device': 'args.device'}), '(dataset, batch_size=args.batch_size, device=args.device\n )\n', (6758, 6820), False, 'from examples.commons import helper\n'), ((7930, 8009), 'examples.commons.helper.update_train_state', 'helper.update_train_state', ([], {'args': 'args', 'model': 'classifier', 'train_state': 'train_state'}), '(args=args, model=classifier, train_state=train_state)\n', (7955, 8009), False, 'from examples.commons import helper\n'), ((1103, 1135), 'torch.tensor', 'torch.tensor', (['vectorized_surname'], {}), '(vectorized_surname)\n', (1115, 1135), False, 'import torch\n'), ((1663, 1695), 'torch.tensor', 'torch.tensor', (['vectorized_surname'], {}), '(vectorized_surname)\n', (1675, 1695), False, 'import torch\n'), ((6087, 6147), 'examples.commons.helper.compute_accuracy', 'helper.compute_accuracy', (['y_pred', "batch_dict['y_nationality']"], {}), "(y_pred, batch_dict['y_nationality'])\n", (6110, 6147), False, 'from examples.commons import helper\n'), ((7458, 7518), 'examples.commons.helper.compute_accuracy', 'helper.compute_accuracy', (['y_pred', "batch_dict['y_nationality']"], {}), "(y_pred, batch_dict['y_nationality'])\n", (7481, 7518), False, 'from examples.commons import helper\n')] |
from rest_framework import serializers
from companies.serializers import (
CenterShortSerializer, CompanyShortSerializer, DivisionShortSerializer, EmployeeShortSerializer
)
from contacts.serializers import EmailSerializer, PhoneSerializer
from .models import Employee
class EmployeeSerializer(serializers.ModelSerializer):
firstname = serializers.StringRelatedField()
patronymic = serializers.StringRelatedField()
surname = serializers.StringRelatedField()
position = serializers.StringRelatedField()
company = CompanyShortSerializer()
center = CenterShortSerializer()
division = DivisionShortSerializer()
secretaries = EmployeeShortSerializer(many=True)
phones = PhoneSerializer(many=True)
emails = EmailSerializer(many=True)
class Meta:
model = Employee
fields = ('id', 'firstname', 'patronymic', 'surname', 'position',
'company', 'center', 'division', 'place', 'is_retired',
'secretaries', 'phones', 'emails', 'birthday', 'comment')
| [
"contacts.serializers.PhoneSerializer",
"companies.serializers.EmployeeShortSerializer",
"rest_framework.serializers.StringRelatedField",
"contacts.serializers.EmailSerializer",
"companies.serializers.CompanyShortSerializer",
"companies.serializers.DivisionShortSerializer",
"companies.serializers.CenterShortSerializer"
] | [((347, 379), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', ([], {}), '()\n', (377, 379), False, 'from rest_framework import serializers\n'), ((397, 429), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', ([], {}), '()\n', (427, 429), False, 'from rest_framework import serializers\n'), ((444, 476), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', ([], {}), '()\n', (474, 476), False, 'from rest_framework import serializers\n'), ((492, 524), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', ([], {}), '()\n', (522, 524), False, 'from rest_framework import serializers\n'), ((539, 563), 'companies.serializers.CompanyShortSerializer', 'CompanyShortSerializer', ([], {}), '()\n', (561, 563), False, 'from companies.serializers import CenterShortSerializer, CompanyShortSerializer, DivisionShortSerializer, EmployeeShortSerializer\n'), ((577, 600), 'companies.serializers.CenterShortSerializer', 'CenterShortSerializer', ([], {}), '()\n', (598, 600), False, 'from companies.serializers import CenterShortSerializer, CompanyShortSerializer, DivisionShortSerializer, EmployeeShortSerializer\n'), ((616, 641), 'companies.serializers.DivisionShortSerializer', 'DivisionShortSerializer', ([], {}), '()\n', (639, 641), False, 'from companies.serializers import CenterShortSerializer, CompanyShortSerializer, DivisionShortSerializer, EmployeeShortSerializer\n'), ((660, 694), 'companies.serializers.EmployeeShortSerializer', 'EmployeeShortSerializer', ([], {'many': '(True)'}), '(many=True)\n', (683, 694), False, 'from companies.serializers import CenterShortSerializer, CompanyShortSerializer, DivisionShortSerializer, EmployeeShortSerializer\n'), ((708, 734), 'contacts.serializers.PhoneSerializer', 'PhoneSerializer', ([], {'many': '(True)'}), '(many=True)\n', (723, 734), False, 'from contacts.serializers import EmailSerializer, PhoneSerializer\n'), ((748, 774), 'contacts.serializers.EmailSerializer', 'EmailSerializer', ([], {'many': '(True)'}), '(many=True)\n', (763, 774), False, 'from contacts.serializers import EmailSerializer, PhoneSerializer\n')] |
import pdb
import math
DEC_BASE = 10
def is_lucky(num):
l = [0] * DEC_BASE
while num > 0:
q = num % 10
if l[q] > 0:
return False
l[q] += 1
num //= 10
return True
# print(is_lucky(123456)) # * True
# print(is_lucky(375746)) # * False
################################################################################
def is_prime(num):
for ele in range(2, math.floor(math.sqrt(num)) + 1):
if num % ele:
continue
else:
return False
return True
# print(is_prime(13))
# * T.C. = O(sqrt(n))
################################################################################
def print_prime(n):
prime = [True] * (n + 1)
prime[0] = False
prime[1] = False
for ele in range(2, n+1):
if prime[ele]:
temp = ele * 2
while temp < n+1:
prime[temp] = False
temp += ele
for ele in range(n):
if prime[ele]:
print(ele)
print_prime(99)
# * T.C. = < O(n * sqrt(n))
################################################################################
| [
"math.sqrt"
] | [((433, 447), 'math.sqrt', 'math.sqrt', (['num'], {}), '(num)\n', (442, 447), False, 'import math\n')] |
# import pytest, sys
from helpers_for_tests import reset_queries, run_args_on_parser as runargs
# sys.path.insert(1, './backup')
# from parser import create_parser
def test_no_args():
result = runargs([])
assert "No arguments were provided." in result.err
def test_check_if_enter_something_other_than_config_add_update_remove_run():
# with pytest.raises(SystemExit):
# parser.parse_args(['foo'])
# out, err = capfd.readouterr()
result = runargs(["foo"])
assert "invalid choice: 'foo'" in result.err
# assert "error: argument command: invalid choice: 'foo'" in out | [
"helpers_for_tests.run_args_on_parser"
] | [((197, 208), 'helpers_for_tests.run_args_on_parser', 'runargs', (['[]'], {}), '([])\n', (204, 208), True, 'from helpers_for_tests import reset_queries, run_args_on_parser as runargs\n'), ((456, 472), 'helpers_for_tests.run_args_on_parser', 'runargs', (["['foo']"], {}), "(['foo'])\n", (463, 472), True, 'from helpers_for_tests import reset_queries, run_args_on_parser as runargs\n')] |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from pr2_gazebo_plugins/SetModelsJointsStatesRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import pr2_gazebo_plugins.msg
import geometry_msgs.msg
class SetModelsJointsStatesRequest(genpy.Message):
_md5sum = "ecf71b483df7b70447575a8231727200"
_type = "pr2_gazebo_plugins/SetModelsJointsStatesRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string[] model_names
pr2_gazebo_plugins/ModelJointsState[] model_joints_states
================================================================================
MSG: pr2_gazebo_plugins/ModelJointsState
geometry_msgs/Pose[] model_pose # set as single element array if user wishes to specify model pose, otherwise, leave empty
string[] joint_names # list of joint names
float64[] joint_positions # list of desired joint positions, should match joint_names
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['model_names','model_joints_states']
_slot_types = ['string[]','pr2_gazebo_plugins/ModelJointsState[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
model_names,model_joints_states
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetModelsJointsStatesRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.model_names is None:
self.model_names = []
if self.model_joints_states is None:
self.model_joints_states = []
else:
self.model_names = []
self.model_joints_states = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.model_names)
buff.write(_struct_I.pack(length))
for val1 in self.model_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
length = len(self.model_joints_states)
buff.write(_struct_I.pack(length))
for val1 in self.model_joints_states:
length = len(val1.model_pose)
buff.write(_struct_I.pack(length))
for val2 in val1.model_pose:
_v1 = val2.position
_x = _v1
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v2 = val2.orientation
_x = _v2
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
length = len(val1.joint_names)
buff.write(_struct_I.pack(length))
for val2 in val1.joint_names:
length = len(val2)
if python3 or type(val2) == unicode:
val2 = val2.encode('utf-8')
length = len(val2)
buff.write(struct.pack('<I%ss'%length, length, val2))
length = len(val1.joint_positions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.joint_positions))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.model_joints_states is None:
self.model_joints_states = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.model_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.model_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.model_joints_states = []
for i in range(0, length):
val1 = pr2_gazebo_plugins.msg.ModelJointsState()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.model_pose = []
for i in range(0, length):
val2 = geometry_msgs.msg.Pose()
_v3 = val2.position
_x = _v3
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v4 = val2.orientation
_x = _v4
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
val1.model_pose.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val2 = str[start:end].decode('utf-8')
else:
val2 = str[start:end]
val1.joint_names.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.joint_positions = struct.unpack(pattern, str[start:end])
self.model_joints_states.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.model_names)
buff.write(_struct_I.pack(length))
for val1 in self.model_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
length = len(self.model_joints_states)
buff.write(_struct_I.pack(length))
for val1 in self.model_joints_states:
length = len(val1.model_pose)
buff.write(_struct_I.pack(length))
for val2 in val1.model_pose:
_v5 = val2.position
_x = _v5
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v6 = val2.orientation
_x = _v6
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
length = len(val1.joint_names)
buff.write(_struct_I.pack(length))
for val2 in val1.joint_names:
length = len(val2)
if python3 or type(val2) == unicode:
val2 = val2.encode('utf-8')
length = len(val2)
buff.write(struct.pack('<I%ss'%length, length, val2))
length = len(val1.joint_positions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.joint_positions.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.model_joints_states is None:
self.model_joints_states = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.model_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.model_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.model_joints_states = []
for i in range(0, length):
val1 = pr2_gazebo_plugins.msg.ModelJointsState()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.model_pose = []
for i in range(0, length):
val2 = geometry_msgs.msg.Pose()
_v7 = val2.position
_x = _v7
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v8 = val2.orientation
_x = _v8
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
val1.model_pose.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val2 = str[start:end].decode('utf-8')
else:
val2 = str[start:end]
val1.joint_names.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.joint_positions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
self.model_joints_states.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_4d = None
def _get_struct_4d():
global _struct_4d
if _struct_4d is None:
_struct_4d = struct.Struct("<4d")
return _struct_4d
_struct_3d = None
def _get_struct_3d():
global _struct_3d
if _struct_3d is None:
_struct_3d = struct.Struct("<3d")
return _struct_3d
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from pr2_gazebo_plugins/SetModelsJointsStatesResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetModelsJointsStatesResponse(genpy.Message):
_md5sum = "2ec6f3eff0161f4257b808b12bc830c2"
_type = "pr2_gazebo_plugins/SetModelsJointsStatesResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool success
string status_message
"""
__slots__ = ['success','status_message']
_slot_types = ['bool','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,status_message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetModelsJointsStatesResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
if self.status_message is None:
self.status_message = ''
else:
self.success = False
self.status_message = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.success))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.success))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class SetModelsJointsStates(object):
_type = 'pr2_gazebo_plugins/SetModelsJointsStates'
_md5sum = 'b3f4760ee77e28f605915bcee447b72d'
_request_class = SetModelsJointsStatesRequest
_response_class = SetModelsJointsStatesResponse
| [
"struct.calcsize",
"struct.pack",
"struct.unpack",
"genpy.DeserializationError",
"struct.Struct"
] | [((11441, 11461), 'struct.Struct', 'struct.Struct', (['"""<4d"""'], {}), "('<4d')\n", (11454, 11461), False, 'import struct\n'), ((11594, 11614), 'struct.Struct', 'struct.Struct', (['"""<3d"""'], {}), "('<3d')\n", (11607, 11614), False, 'import struct\n'), ((16386, 16405), 'struct.Struct', 'struct.Struct', (['"""<B"""'], {}), "('<B')\n", (16399, 16405), False, 'import struct\n'), ((6726, 6750), 'struct.calcsize', 'struct.calcsize', (['pattern'], {}), '(pattern)\n', (6741, 6750), False, 'import struct\n'), ((6782, 6820), 'struct.unpack', 'struct.unpack', (['pattern', 'str[start:end]'], {}), '(pattern, str[start:end])\n', (6795, 6820), False, 'import struct\n'), ((6927, 6956), 'genpy.DeserializationError', 'genpy.DeserializationError', (['e'], {}), '(e)\n', (6953, 6956), False, 'import genpy\n'), ((10950, 10974), 'struct.calcsize', 'struct.calcsize', (['pattern'], {}), '(pattern)\n', (10965, 10974), False, 'import struct\n'), ((11180, 11209), 'genpy.DeserializationError', 'genpy.DeserializationError', (['e'], {}), '(e)\n', (11206, 11209), False, 'import genpy\n'), ((13619, 13660), 'struct.pack', 'struct.pack', (["('<I%ss' % length)", 'length', '_x'], {}), "('<I%ss' % length, length, _x)\n", (13630, 13660), False, 'import struct\n'), ((14593, 14622), 'genpy.DeserializationError', 'genpy.DeserializationError', (['e'], {}), '(e)\n', (14619, 14622), False, 'import genpy\n'), ((15077, 15118), 'struct.pack', 'struct.pack', (["('<I%ss' % length)", 'length', '_x'], {}), "('<I%ss' % length, length, _x)\n", (15088, 15118), False, 'import struct\n'), ((16130, 16159), 'genpy.DeserializationError', 'genpy.DeserializationError', (['e'], {}), '(e)\n', (16156, 16159), False, 'import genpy\n'), ((3304, 3347), 'struct.pack', 'struct.pack', (["('<I%ss' % length)", 'length', 'val1'], {}), "('<I%ss' % length, length, val1)\n", (3315, 3347), False, 'import struct\n'), ((4294, 4337), 'struct.pack', 'struct.pack', (['pattern', '*val1.joint_positions'], {}), '(pattern, *val1.joint_positions)\n', (4305, 4337), False, 'import struct\n'), ((7461, 7504), 'struct.pack', 'struct.pack', (["('<I%ss' % length)", 'length', 'val1'], {}), "('<I%ss' % length, length, val1)\n", (7472, 7504), False, 'import struct\n'), ((4114, 4157), 'struct.pack', 'struct.pack', (["('<I%ss' % length)", 'length', 'val2'], {}), "('<I%ss' % length, length, val2)\n", (4125, 4157), False, 'import struct\n'), ((8271, 8314), 'struct.pack', 'struct.pack', (["('<I%ss' % length)", 'length', 'val2'], {}), "('<I%ss' % length, length, val2)\n", (8282, 8314), False, 'import struct\n')] |
from django.utils.functional import cached_property
from wagtail.core.blocks import ChooserBlock
class VideoChooserBlock(ChooserBlock):
@cached_property
def target_model(self):
from wagtailvideos import get_video_model
return get_video_model()
@cached_property
def widget(self):
from wagtailvideos.widgets import AdminVideoChooser
return AdminVideoChooser
def render_basic(self, value, context=None):
if value:
return value.video_tag(attrs={"controls": True})
else:
return ""
class Meta:
icon = 'media'
| [
"wagtailvideos.get_video_model"
] | [((252, 269), 'wagtailvideos.get_video_model', 'get_video_model', ([], {}), '()\n', (267, 269), False, 'from wagtailvideos import get_video_model\n')] |
# https://leetcode.com/problems/number-of-rectangles-that-can-form-the-largest-square/
# You are given an array rectangles where rectangles[i] = [li, wi] represents the ith rectangle of length li and width wi.
# You can cut the ith rectangle to form a square with a side length of k if both k <= li and k <= wi.
# For example, if you have a rectangle [4,6], you can cut it to get a square with a side length of at most 4.
# Let maxLen be the side length of the largest square you can obtain from any of the given rectangles.
# Return the number of rectangles that can make a square with a side length of maxLen.
import pytest
class Solution:
def countGoodRectangles(self, rectangles: list[list[int]]) -> int:
count = {0: 0}
max_side = 0
for l, w in rectangles:
val = l if l < w else w
max_side = val if val > max_side else max_side
try:
count[val] += 1
except KeyError:
count[val] = 1
return count[max_side]
@pytest.mark.parametrize(
("rectangles", "expected"),
[([[5, 8], [3, 9], [5, 12], [16, 5]], 3), ([[2, 3], [3, 7], [4, 3], [3, 7]], 3)],
)
def test_basic(rectangles: list[list[int]], expected: int):
assert expected == Solution().countGoodRectangles(rectangles)
| [
"pytest.mark.parametrize"
] | [((1037, 1175), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('rectangles', 'expected')", '[([[5, 8], [3, 9], [5, 12], [16, 5]], 3), ([[2, 3], [3, 7], [4, 3], [3, 7]], 3)\n ]'], {}), "(('rectangles', 'expected'), [([[5, 8], [3, 9], [5, \n 12], [16, 5]], 3), ([[2, 3], [3, 7], [4, 3], [3, 7]], 3)])\n", (1060, 1175), False, 'import pytest\n')] |
# python script to batch add emoji to slack.
# see README.md for instructions
import getpass
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import sys
# Get user input
slack_team_name = raw_input("Slack team name: ")
img_path = raw_input("Path to image directory: ")
email = raw_input("Email address: ")
# Get slack password safely using getpass
# https://docs.python.org/2/library/getpass.html
password = getpass.getpass('Slack Password:')
# open the browser and get the page
driver = webdriver.Chrome()
driver.get("https://%s.slack.com/emoji" % slack_team_name)
# login
elem = driver.find_element_by_id("email")
elem.send_keys(email)
elem = driver.find_element_by_id("password")
elem.send_keys(password)
elem.send_keys(Keys.RETURN)
# loop and upload the images in user given path
for filename in os.listdir(img_path):
if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif')):
# put in file name
elem = driver.find_element_by_id('emojiname')
elem.send_keys(filename.split('.')[0])
# put in path to file
elem = driver.find_element_by_id('emojiimg')
path = os.getcwd() + '/' + img_path + '/' + filename
elem.send_keys(path)
# submit the form
driver.find_element_by_xpath("//input[@type='submit' and @value='Save New Emoji']").click()
# Close the browser session
driver.close()
| [
"selenium.webdriver.Chrome",
"os.listdir",
"getpass.getpass",
"os.getcwd"
] | [((449, 483), 'getpass.getpass', 'getpass.getpass', (['"""Slack Password:"""'], {}), "('Slack Password:')\n", (464, 483), False, 'import getpass\n'), ((530, 548), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (546, 548), False, 'from selenium import webdriver\n'), ((843, 863), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (853, 863), False, 'import os\n'), ((1160, 1171), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1169, 1171), False, 'import os\n')] |
#! /usr/bin/env python3
import sys
import re
if sys.version_info <= (3, 0):
print("This program only runs in python3", file=sys.stderr)
exit(1)
mappings = {}
with open("arpa-ipa.map") as f:
for line in f:
arpa, ipa = line.strip().split("\t")
mappings[arpa] = ipa
brackets = re.compile('\(\d+\)')
numbers = re.compile('[012]')
comments = re.compile(' #.+$')
with open("cmudict.dict") as f:
for line in f:
line = line.strip()
line = comments.sub("", line)
word, arpa = line.split(" ", 1)
word = brackets.sub("", word)
arpa = numbers.sub("", arpa).split(" ")
mapped = []
for part in arpa:
ipa = mappings.get(part)
if ipa:
mapped.append(ipa)
else:
print("Could not map symbol %s in phrase: %s" % (part, word),
file=sys.stderr)
continue
print("%s\t%s" % (word, " ".join(mapped)))
| [
"re.compile"
] | [((306, 330), 're.compile', 're.compile', (['"""\\\\(\\\\d+\\\\)"""'], {}), "('\\\\(\\\\d+\\\\)')\n", (316, 330), False, 'import re\n'), ((338, 357), 're.compile', 're.compile', (['"""[012]"""'], {}), "('[012]')\n", (348, 357), False, 'import re\n'), ((369, 388), 're.compile', 're.compile', (['""" #.+$"""'], {}), "(' #.+$')\n", (379, 388), False, 'import re\n')] |
#!/usr/bin/env python3
"""
TODO:
- config from params
- sensor data publish
- diagnostics
"""
import rospy
from std_msgs.msg import Float64
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from geometry_msgs.msg import TwistWithCovarianceStamped
import time
import sys
from odrive_driver import ODriveDriver
class ODriveNode:
wheel_separation = 0.42
wheel_radius = 0.05
left_speed = 0.0
right_speed = 0.0
topic_timer = 0.0
topic_timeout = 0.5
is_timed_out = True
frequency = 15.0
def __init__(self):
rospy.init_node("odrive")
rospy.Subscriber("cmd_vel", Twist, self.cmd_vel_callback, queue_size=2)
odom_pub = rospy.Publisher("odom", TwistWithCovarianceStamped, queue_size=2)
odom_msg = TwistWithCovarianceStamped()
odom_msg.header.frame_id = "base_link"
voltage_pub = rospy.Publisher("status/voltage", Float64, queue_size=2)
voltage_msg = Float64()
current_pub = rospy.Publisher("status/current", Float64, queue_size=2)
current_msg = Float64()
# ~ temp_l_pub = rospy.Publisher("status/temperature_left", Float64, queue_size=2)
# ~ temp_l_msg = Float64()
# ~ temp_r_pub = rospy.Publisher("status/temperature_right", Float64, queue_size=2)
# ~ temp_r_msg = Float64()
status_pub = rospy.Publisher("status/status", String, queue_size=2)
status_msg = String()
error_pub = rospy.Publisher("status/errors", String, queue_size=2)
error_msg = String()
odrive = ODriveDriver()
rate = rospy.Rate(self.frequency)
while not rospy.is_shutdown():
try:
odrive.update()
if self.topic_timer < self.topic_timeout:
odrive.set_velocity(self.left_speed, self.right_speed)
self.is_timed_out = False
else:
odrive.set_velocity(0, 0)
self.is_timed_out = True
self.topic_timer += 1.0 / self.frequency
odom_msg.header.stamp = rospy.Time.now()
odom_msg.twist.twist.linear.x = -((((odrive.get_velocity_right() * 2 * 3.14159265) + (odrive.get_velocity_left() * 2 * 3.14159265)) / 2.0) * self.wheel_radius)
odom_msg.twist.twist.angular.z = ((((odrive.get_velocity_right() * 2 * 3.14159265) - (odrive.get_velocity_left() * 2 * 3.14159265)) / 2.0) / (self.wheel_separation / 2)) * self.wheel_radius
odom_pub.publish(odom_msg)
voltage_msg.data = odrive.get_voltage()
voltage_pub.publish(voltage_msg)
current_msg.data = odrive.get_current()
current_pub.publish(current_msg)
# ~ temp_l_msg.data = odrive.get_temperature_left()
# ~ temp_l_pub.publish(temp_l_msg)
# ~ temp_r_msg.data = odrive.get_temperature_right()
# ~ temp_r_pub.publish(temp_r_msg)
status_msg.data = odrive.get_status_string()
if not self.is_timed_out:
status_msg.data += " (Active)"
else:
status_msg.data += " (Timed out)"
status_pub.publish(status_msg)
error_msg.data = str(odrive.get_errors())
error_pub.publish(error_msg)
except Exception as e:
status_msg.data = "Connection error."
status_pub.publish(status_msg)
print(e)
rate.sleep()
odrive.disengage()
def cmd_vel_callback(self, msg):
self.left_speed = ((-msg.linear.x - (msg.angular.z * self.wheel_separation / 2)) / self.wheel_radius) / (2 * 3.14159265)
self.right_speed = ((-msg.linear.x + (msg.angular.z * self.wheel_separation / 2)) / self.wheel_radius) / (2 * 3.14159265)
self.topic_timer = 0.0
if __name__ == '__main__':
odrvnode = ODriveNode()
print("ODrive node exiting.")
| [
"std_msgs.msg.String",
"rospy.Subscriber",
"std_msgs.msg.Float64",
"rospy.is_shutdown",
"odrive_driver.ODriveDriver",
"rospy.init_node",
"geometry_msgs.msg.TwistWithCovarianceStamped",
"rospy.Time.now",
"rospy.Rate",
"rospy.Publisher"
] | [((601, 626), 'rospy.init_node', 'rospy.init_node', (['"""odrive"""'], {}), "('odrive')\n", (616, 626), False, 'import rospy\n'), ((635, 706), 'rospy.Subscriber', 'rospy.Subscriber', (['"""cmd_vel"""', 'Twist', 'self.cmd_vel_callback'], {'queue_size': '(2)'}), "('cmd_vel', Twist, self.cmd_vel_callback, queue_size=2)\n", (651, 706), False, 'import rospy\n'), ((735, 800), 'rospy.Publisher', 'rospy.Publisher', (['"""odom"""', 'TwistWithCovarianceStamped'], {'queue_size': '(2)'}), "('odom', TwistWithCovarianceStamped, queue_size=2)\n", (750, 800), False, 'import rospy\n'), ((820, 848), 'geometry_msgs.msg.TwistWithCovarianceStamped', 'TwistWithCovarianceStamped', ([], {}), '()\n', (846, 848), False, 'from geometry_msgs.msg import TwistWithCovarianceStamped\n'), ((927, 983), 'rospy.Publisher', 'rospy.Publisher', (['"""status/voltage"""', 'Float64'], {'queue_size': '(2)'}), "('status/voltage', Float64, queue_size=2)\n", (942, 983), False, 'import rospy\n'), ((1006, 1015), 'std_msgs.msg.Float64', 'Float64', ([], {}), '()\n', (1013, 1015), False, 'from std_msgs.msg import Float64\n'), ((1047, 1103), 'rospy.Publisher', 'rospy.Publisher', (['"""status/current"""', 'Float64'], {'queue_size': '(2)'}), "('status/current', Float64, queue_size=2)\n", (1062, 1103), False, 'import rospy\n'), ((1126, 1135), 'std_msgs.msg.Float64', 'Float64', ([], {}), '()\n', (1133, 1135), False, 'from std_msgs.msg import Float64\n'), ((1437, 1491), 'rospy.Publisher', 'rospy.Publisher', (['"""status/status"""', 'String'], {'queue_size': '(2)'}), "('status/status', String, queue_size=2)\n", (1452, 1491), False, 'import rospy\n'), ((1513, 1521), 'std_msgs.msg.String', 'String', ([], {}), '()\n', (1519, 1521), False, 'from std_msgs.msg import String\n'), ((1551, 1605), 'rospy.Publisher', 'rospy.Publisher', (['"""status/errors"""', 'String'], {'queue_size': '(2)'}), "('status/errors', String, queue_size=2)\n", (1566, 1605), False, 'import rospy\n'), ((1626, 1634), 'std_msgs.msg.String', 'String', ([], {}), '()\n', (1632, 1634), False, 'from std_msgs.msg import String\n'), ((1653, 1667), 'odrive_driver.ODriveDriver', 'ODriveDriver', ([], {}), '()\n', (1665, 1667), False, 'from odrive_driver import ODriveDriver\n'), ((1692, 1718), 'rospy.Rate', 'rospy.Rate', (['self.frequency'], {}), '(self.frequency)\n', (1702, 1718), False, 'import rospy\n'), ((1746, 1765), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1763, 1765), False, 'import rospy\n'), ((2253, 2269), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (2267, 2269), False, 'import rospy\n')] |
# Generated by Django 2.0.2 on 2018-06-22 00:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20180524_2056'),
]
operations = [
migrations.CreateModel(
name='Support',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.CharField(max_length=300)),
('student_from', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='supports_given', to=settings.AUTH_USER_MODEL)),
('student_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='supports_received', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((393, 486), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (409, 486), False, 'from django.db import migrations, models\n'), ((513, 545), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (529, 545), False, 'from django.db import migrations, models\n'), ((581, 708), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""supports_given"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='supports_given', to=settings.AUTH_USER_MODEL)\n", (598, 708), False, 'from django.db import migrations, models\n'), ((737, 867), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""supports_received"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='supports_received', to=settings.AUTH_USER_MODEL)\n", (754, 867), False, 'from django.db import migrations, models\n')] |
import numpy as np
import math
import numpy.random as random
import matplotlib.pyplot as plt
import sys
import os
import random as rand
import mlayers as ml
#import mnist.py
#FIX THIS --- Filter back-propagation results in numbers too large; the np.exp in the softmax layer cannot be computed for such large numbers
from scipy import misc, ndimage
EPOCHS = 20000
LEARN_RATE = 0.00001
ml.LEARN_RATE = 0.001
GRADIENT_THRESHOLD = 1
debug_mode = False
class ConvolutionalLayer():
cache = np.array([0]) #Used to store the values for back-propagation
weights = np.array([0]) #Weights for each connection between neurons represented as a matrix
def __init__(self, width, height, depth, filter_num, fsize, stride, zero_padding):
#width, height = dimensions of input
#depth = number of inputs
#filters = number of filters, fsize = side length of filter
#stride = number of units moved during convolution by filter
#zero_padding = number of zero "outlines" to surround input with during convolution
self.width = width
self.height = height
self.depth = depth
self.filter_num = filter_num
self.fsize = fsize
self.stride = stride
self.zero_padding = zero_padding
self.filters = [[np.random.uniform(0, math.sqrt(2/(self.height * self.width)), (self.fsize,self.fsize)) for layer in range(self.depth)] for filter_col in range(self.filter_num)]
self.bias = np.random.uniform(0, 1, self.filter_num)
#self.cache = np.zeros((rows,1))
#self.weights = np.random.uniform(-np.sqrt(1./cols), np.sqrt(1./cols), (rows, cols+1))
#self.mem_weights = np.zeros(self.weights.shape)
#self.filters =
def forward(self, inputArr):
#filters = list of all filters
#outputs = list(?) of outputs
self.cache = inputArr
self.o_width = int((self.width - self.fsize)/self.stride) + 1
self.o_height = int((self.height - self.fsize)/self.stride) + 1
output = np.zeros((self.filter_num, self.o_height, self.o_width))
for f in range(self.filter_num):
for layer in range(self.depth):
if(debug_mode):
print("filter\n",self.filters[f][layer])
print("bias\n", self.bias[f])
for i in range(self.o_height):
for j in range(self.o_width):
#section = input section (x_ij)
#section = np.zeros((self.fsize,self.fsize))
section = inputArr[layer, i*self.stride:i*self.stride + self.fsize:1, j*self.stride:j*self.stride + self.fsize:1]
"""
for m in range(self.fsize):
for n in range(self.fsize):
section[m][n] = inputArr[m + i*self.stride][n + j*self.stride][layer]
"""
#print(np.shape(inputArr), np.shape(section), np.shape(self.filters[f][layer]))
output[f][i][j] += np.sum(np.multiply(section, self.filters[f][layer])) + self.bias[f] #use the proper filter for each one
#print(i)
#sys.stdout.flush()
return output
def backward(self, gradient):
dCdx = np.zeros((self.depth, self.height, self.width))
"""
#Gradient Clipping
if(np.abs(np.linalg.norm(gradient)) > GRADIENT_THRESHOLD):
gradient = GRADIENT_THRESHOLD * gradient / np.linalg.norm(gradient)
"""
for f in range(self.filter_num):
for layer in range(self.depth):
dCdf = np.zeros((self.fsize, self.fsize))
#dzdx = np.zeros((self.o_height, self.o_width))
for i in range(self.fsize):
for j in range(self.fsize):
#iteration TODO
for m in range(self.o_height):
for n in range(self.o_width):
dCdf[i][j] += self.cache[layer][i + m*self.stride][j + n*self.stride] * gradient[f][m][n]
self.bias[f] -= LEARN_RATE * gradient[f][m][n]
#Rotating filter for convolution
dCdx[layer][m*self.stride + i][n*self.stride + j] += self.filters[f][layer][-i][-j] * gradient[f][m][n]
if(f == 0 and debug_mode):
#print("gradient\n", np.mean(gradient))
print("dCdf\n", dCdf)
self.filters[f][layer] -= LEARN_RATE * dCdf
return dCdx#np.dot(dCdx, gradient)
class MaxPoolingLayer():
def __init__(self, chunk_width, chunk_height, averageValues=False):
self.chunk_width = chunk_width
self.chunk_height = chunk_height
self.averageValues = averageValues
def forward(self, inputArr):
self.new_height = int(len(inputArr[0]) / self.chunk_height)
self.new_width = int(len(inputArr[0][0]) / self.chunk_width)
self.overhang_h = len(inputArr[0]) % self.chunk_height
self.overhang_w = len(inputArr[0][0]) % self.chunk_width
#print(self.new_height, self.new_width, self.overhang_h, self.overhang_w)
self.depth = len(inputArr)
pooled_arr = np.zeros((self.depth, self.new_height + np.sign(self.overhang_h), self.new_width + np.sign(self.overhang_w)))
self.max_positions = [[[np.zeros(2) for x in range(self.new_width + np.sign(self.overhang_w))] for y in range(self.new_height + np.sign(self.overhang_h))] for layer in range(self.depth)]
for layer in range(self.depth):
for i in range(self.new_height + np.sign(self.overhang_h)):
for j in range(self.new_width + np.sign(self.overhang_w)):
max_value = 0
max_x = 0
max_y = 0
for m in range(self.chunk_height if (i < self.new_height) else self.overhang_h):
for n in range(self.chunk_width if (j < self.new_width) else self.overhang_w):
#print("point\n", max_value, layer, i*self.chunk_height + m, j*self.chunk_width + n)
if(inputArr[layer][i*self.chunk_height + m][j*self.chunk_width + n] > max_value):
max_value = inputArr[layer][i*self.chunk_height + m][j*self.chunk_width + n]
max_x = j*self.chunk_width + n
max_y = i*self.chunk_height + m
pooled_arr[layer][i][j] = max_value
self.max_positions[layer][i][j] = np.array([max_x, max_y])
return pooled_arr
def backward(self, gradient):
dCdP = np.zeros((self.depth, self.new_height * self.chunk_height + self.overhang_h, self.new_width * self.chunk_width + self.overhang_w))
for layer in range(self.depth):
for i in range(self.new_height + np.sign(self.overhang_h)):
for j in range(self.new_width + np.sign(self.overhang_w)):
#Searching for max value position from input to distribute the error to
dCdP[layer][self.max_positions[layer][i][j][1]][self.max_positions[layer][i][j][0]] = gradient[layer][i][j]
return dCdP
class ReLULayer():
def __init__(self):
print("kek")
#self.cache
def forward(self, inputArr):
self.cache = np.maximum(inputArr, 0)
return self.cache
def backward(self, gradient):
#print(np.multiply(np.sign(self.cache), gradient))
return np.multiply(np.sign(self.cache), gradient)
class LeakyReLULayer():
def __init__(self):
print("kek")
#self.cache
def forward(self, inputArr):
self.cache = np.maximum(inputArr, 0.1*inputArr)
return self.cache
def backward(self, gradient):
#print(np.multiply(np.sign(self.cache), gradient))
return np.multiply(np.sign(self.cache), gradient)
class FullyConnectedLayer():
cache = np.array([0]) #Used to store the values for back-propagation
weights = np.array([0]) #Weights for each connection between neurons represented as a matrix
def __init__(self, input_depth, input_height, input_width, new_dim):
#rows = hidden layer size
#cols = number of unique classifications - size of input vector
self.old_height = input_height
self.old_width = input_width
self.cols = input_height * input_width * input_depth
self.rows = new_dim
self.depth = input_depth
self.cache = np.zeros((self.rows,1))
self.weights = np.random.uniform(-np.sqrt(1./self.cols), np.sqrt(1./self.cols), (self.rows, self.cols+1))
self.mem_weights = np.zeros(self.weights.shape)
def forward(self, inputArr):
flatArr = np.ndarray.flatten(inputArr)
self.cache = np.resize(np.append(flatArr, [1]), (len(flatArr) + 1, 1))
self.mem_weights = 0.9*self.mem_weights + 0.1*(self.weights ** 2) #incrementing for adagrad
return np.dot(self.weights, self.cache)
def backward(self, gradient):
self.weights -= np.outer(gradient, self.cache.T) * LEARN_RATE / np.sqrt(self.mem_weights + 1e-8)
return np.reshape(np.dot(self.weights.T, gradient)[:len(np.dot(self.weights.T, gradient)) - 1], (self.depth, self.old_height, self.old_width))
def subsample_layer(array, layer):
newArray = np.zeros((1, len(array[0]), len(array[0][0])))
for i in range(len(array)):
for j in range(len(array[0])):
newArray[0][i][j] = array[layer][i][j]
return newArray
def seperate_layers(array):
newArray = np.zeros((len(array[0][0]), len(array), len(array[0])))
for i in range(len(array)):
for j in range(len(array[0])):
for k in range(len(array[0][0])):
newArray[k][i][j] = array[i][j][k]
return newArray
training_data = []
index = 0
for root, dirnames, filenames in os.walk("training_data"):
for filename in filenames:
filepath = os.path.join(root, filename)
image = seperate_layers(ndimage.imread(filepath, mode="RGB"))
training_data.append((index, image))
index += 1
possible_classifications = len(training_data)
layers = [ConvolutionalLayer(16,16,1,10,3,1,0), LeakyReLULayer(), MaxPoolingLayer(2,2), FullyConnectedLayer(10,7,7,30), LeakyReLULayer(), ml.InnerLayer(possible_classifications, 30), ml.SoftmaxLayer()]
#layers = [ConvolutionalLayer(64,64,3,3,7,2,0), ReLULayer(), ConvolutionalLayer(58,58,3,3,5,1,0), ReLULayer(), FullyConnectedLayer(2,7,7,10), ml.InnerLayer(possible_classifications, 10), ml.SoftmaxLayer()]
#layers = [ConvolutionalLayer(32,32,1,5,5,1,0), LeakyReLULayer(), MaxPoolingLayer(2,2), ConvolutionalLayer(14,14,5,10,5,1,0), LeakyReLULayer(), MaxPoolingLayer(2,2), FullyConnectedLayer(10,5,5,20), LeakyReLULayer(), ml.InnerLayer(possible_classifications, 20), ml.SoftmaxLayer()]
error = np.zeros((0,2))
for i in range(EPOCHS):
sample = rand.choice(training_data)
#print(sample[1].shape)
temp = np.divide(sample[1],255)
temp = subsample_layer(temp, 0)
expected = np.zeros((possible_classifications, 1))
expected[sample[0]] = 1
for layer in layers:
temp = layer.forward(temp)
if(debug_mode):
print("forward pass", layer, np.mean(temp), temp.shape)
#print("average value of weights", np.mean(layers[2].weights), np.mean(layers[3].weights))
loss = np.subtract(temp, expected)
#print(np.argmax(expected), np.argmax(temp))
if(i%1 == 0):
print(i, temp.T, expected.T)
temp = expected
layers.reverse()
for layer in layers:
temp = layer.backward(temp)
if(debug_mode):
print("backprop", layer, np.linalg.norm(temp), temp.shape)#, "\n", temp)
layers.reverse()
error = np.append(error, np.absolute(np.array([[i, np.sum(np.abs(loss))]])), axis=0)
plt.plot(error[:,0], error[:,1])
plt.xlabel("Iteration")
plt.ylabel("Error")
plt.show()
for fil_layer in layers[0].filters:
for fil in fil_layer:
plt.imshow(fil)
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"math.sqrt",
"scipy.ndimage.imread",
"mlayers.InnerLayer",
"numpy.array",
"numpy.linalg.norm",
"numpy.divide",
"os.walk",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.multiply",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.subtract",
"numpy.dot",
"numpy.maximum",
"numpy.abs",
"random.choice",
"mlayers.SoftmaxLayer",
"numpy.ndarray.flatten",
"numpy.outer",
"numpy.sign",
"matplotlib.pyplot.show",
"os.path.join",
"numpy.append",
"numpy.zeros",
"numpy.random.uniform"
] | [((10092, 10116), 'os.walk', 'os.walk', (['"""training_data"""'], {}), "('training_data')\n", (10099, 10116), False, 'import os\n'), ((11082, 11098), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (11090, 11098), True, 'import numpy as np\n'), ((12070, 12104), 'matplotlib.pyplot.plot', 'plt.plot', (['error[:, 0]', 'error[:, 1]'], {}), '(error[:, 0], error[:, 1])\n', (12078, 12104), True, 'import matplotlib.pyplot as plt\n'), ((12103, 12126), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (12113, 12126), True, 'import matplotlib.pyplot as plt\n'), ((12127, 12146), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (12137, 12146), True, 'import matplotlib.pyplot as plt\n'), ((12148, 12158), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12156, 12158), True, 'import matplotlib.pyplot as plt\n'), ((495, 508), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (503, 508), True, 'import numpy as np\n'), ((571, 584), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (579, 584), True, 'import numpy as np\n'), ((8136, 8149), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (8144, 8149), True, 'import numpy as np\n'), ((8212, 8225), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (8220, 8225), True, 'import numpy as np\n'), ((10517, 10560), 'mlayers.InnerLayer', 'ml.InnerLayer', (['possible_classifications', '(30)'], {}), '(possible_classifications, 30)\n', (10530, 10560), True, 'import mlayers as ml\n'), ((10562, 10579), 'mlayers.SoftmaxLayer', 'ml.SoftmaxLayer', ([], {}), '()\n', (10577, 10579), True, 'import mlayers as ml\n'), ((11137, 11163), 'random.choice', 'rand.choice', (['training_data'], {}), '(training_data)\n', (11148, 11163), True, 'import random as rand\n'), ((11203, 11228), 'numpy.divide', 'np.divide', (['sample[1]', '(255)'], {}), '(sample[1], 255)\n', (11212, 11228), True, 'import numpy as np\n'), ((11280, 11319), 'numpy.zeros', 'np.zeros', (['(possible_classifications, 1)'], {}), '((possible_classifications, 1))\n', (11288, 11319), True, 'import numpy as np\n'), ((11609, 11636), 'numpy.subtract', 'np.subtract', (['temp', 'expected'], {}), '(temp, expected)\n', (11620, 11636), True, 'import numpy as np\n'), ((1475, 1515), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.filter_num'], {}), '(0, 1, self.filter_num)\n', (1492, 1515), True, 'import numpy as np\n'), ((2037, 2093), 'numpy.zeros', 'np.zeros', (['(self.filter_num, self.o_height, self.o_width)'], {}), '((self.filter_num, self.o_height, self.o_width))\n', (2045, 2093), True, 'import numpy as np\n'), ((3348, 3395), 'numpy.zeros', 'np.zeros', (['(self.depth, self.height, self.width)'], {}), '((self.depth, self.height, self.width))\n', (3356, 3395), True, 'import numpy as np\n'), ((6829, 6963), 'numpy.zeros', 'np.zeros', (['(self.depth, self.new_height * self.chunk_height + self.overhang_h, self.\n new_width * self.chunk_width + self.overhang_w)'], {}), '((self.depth, self.new_height * self.chunk_height + self.overhang_h,\n self.new_width * self.chunk_width + self.overhang_w))\n', (6837, 6963), True, 'import numpy as np\n'), ((7530, 7553), 'numpy.maximum', 'np.maximum', (['inputArr', '(0)'], {}), '(inputArr, 0)\n', (7540, 7553), True, 'import numpy as np\n'), ((7879, 7915), 'numpy.maximum', 'np.maximum', (['inputArr', '(0.1 * inputArr)'], {}), '(inputArr, 0.1 * inputArr)\n', (7889, 7915), True, 'import numpy as np\n'), ((8698, 8722), 'numpy.zeros', 'np.zeros', (['(self.rows, 1)'], {}), '((self.rows, 1))\n', (8706, 8722), True, 'import numpy as np\n'), ((8864, 8892), 'numpy.zeros', 'np.zeros', (['self.weights.shape'], {}), '(self.weights.shape)\n', (8872, 8892), True, 'import numpy as np\n'), ((8944, 8972), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['inputArr'], {}), '(inputArr)\n', (8962, 8972), True, 'import numpy as np\n'), ((9169, 9201), 'numpy.dot', 'np.dot', (['self.weights', 'self.cache'], {}), '(self.weights, self.cache)\n', (9175, 9201), True, 'import numpy as np\n'), ((10168, 10196), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (10180, 10196), False, 'import os\n'), ((12230, 12245), 'matplotlib.pyplot.imshow', 'plt.imshow', (['fil'], {}), '(fil)\n', (12240, 12245), True, 'import matplotlib.pyplot as plt\n'), ((7701, 7720), 'numpy.sign', 'np.sign', (['self.cache'], {}), '(self.cache)\n', (7708, 7720), True, 'import numpy as np\n'), ((8061, 8080), 'numpy.sign', 'np.sign', (['self.cache'], {}), '(self.cache)\n', (8068, 8080), True, 'import numpy as np\n'), ((8787, 8811), 'numpy.sqrt', 'np.sqrt', (['(1.0 / self.cols)'], {}), '(1.0 / self.cols)\n', (8794, 8811), True, 'import numpy as np\n'), ((9005, 9028), 'numpy.append', 'np.append', (['flatArr', '[1]'], {}), '(flatArr, [1])\n', (9014, 9028), True, 'import numpy as np\n'), ((9310, 9343), 'numpy.sqrt', 'np.sqrt', (['(self.mem_weights + 1e-08)'], {}), '(self.mem_weights + 1e-08)\n', (9317, 9343), True, 'import numpy as np\n'), ((10229, 10265), 'scipy.ndimage.imread', 'ndimage.imread', (['filepath'], {'mode': '"""RGB"""'}), "(filepath, mode='RGB')\n", (10243, 10265), False, 'from scipy import misc, ndimage\n'), ((3704, 3738), 'numpy.zeros', 'np.zeros', (['(self.fsize, self.fsize)'], {}), '((self.fsize, self.fsize))\n', (3712, 3738), True, 'import numpy as np\n'), ((8764, 8788), 'numpy.sqrt', 'np.sqrt', (['(1.0 / self.cols)'], {}), '(1.0 / self.cols)\n', (8771, 8788), True, 'import numpy as np\n'), ((9262, 9294), 'numpy.outer', 'np.outer', (['gradient', 'self.cache.T'], {}), '(gradient, self.cache.T)\n', (9270, 9294), True, 'import numpy as np\n'), ((9370, 9402), 'numpy.dot', 'np.dot', (['self.weights.T', 'gradient'], {}), '(self.weights.T, gradient)\n', (9376, 9402), True, 'import numpy as np\n'), ((11474, 11487), 'numpy.mean', 'np.mean', (['temp'], {}), '(temp)\n', (11481, 11487), True, 'import numpy as np\n'), ((11908, 11928), 'numpy.linalg.norm', 'np.linalg.norm', (['temp'], {}), '(temp)\n', (11922, 11928), True, 'import numpy as np\n'), ((1315, 1356), 'math.sqrt', 'math.sqrt', (['(2 / (self.height * self.width))'], {}), '(2 / (self.height * self.width))\n', (1324, 1356), False, 'import math\n'), ((5407, 5431), 'numpy.sign', 'np.sign', (['self.overhang_h'], {}), '(self.overhang_h)\n', (5414, 5431), True, 'import numpy as np\n'), ((5450, 5474), 'numpy.sign', 'np.sign', (['self.overhang_w'], {}), '(self.overhang_w)\n', (5457, 5474), True, 'import numpy as np\n'), ((5510, 5521), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (5518, 5521), True, 'import numpy as np\n'), ((5759, 5783), 'numpy.sign', 'np.sign', (['self.overhang_h'], {}), '(self.overhang_h)\n', (5766, 5783), True, 'import numpy as np\n'), ((6728, 6752), 'numpy.array', 'np.array', (['[max_x, max_y]'], {}), '([max_x, max_y])\n', (6736, 6752), True, 'import numpy as np\n'), ((7045, 7069), 'numpy.sign', 'np.sign', (['self.overhang_h'], {}), '(self.overhang_h)\n', (7052, 7069), True, 'import numpy as np\n'), ((5834, 5858), 'numpy.sign', 'np.sign', (['self.overhang_w'], {}), '(self.overhang_w)\n', (5841, 5858), True, 'import numpy as np\n'), ((7120, 7144), 'numpy.sign', 'np.sign', (['self.overhang_w'], {}), '(self.overhang_w)\n', (7127, 7144), True, 'import numpy as np\n'), ((5614, 5638), 'numpy.sign', 'np.sign', (['self.overhang_h'], {}), '(self.overhang_h)\n', (5621, 5638), True, 'import numpy as np\n'), ((9408, 9440), 'numpy.dot', 'np.dot', (['self.weights.T', 'gradient'], {}), '(self.weights.T, gradient)\n', (9414, 9440), True, 'import numpy as np\n'), ((12041, 12053), 'numpy.abs', 'np.abs', (['loss'], {}), '(loss)\n', (12047, 12053), True, 'import numpy as np\n'), ((3106, 3150), 'numpy.multiply', 'np.multiply', (['section', 'self.filters[f][layer]'], {}), '(section, self.filters[f][layer])\n', (3117, 3150), True, 'import numpy as np\n'), ((5554, 5578), 'numpy.sign', 'np.sign', (['self.overhang_w'], {}), '(self.overhang_w)\n', (5561, 5578), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# this script runs all examples and checks that they all
# run without throwing and exception
from __future__ import print_function
import os
import sys
from glob import glob
import subprocess as subp
if not os.path.isdir('examples'):
raise IOError('Must run from root dir of none repo')
# check for venv activations
cmd = 'if [ -z "$VIRTUAL_ENV" ];then exit 1;else exit 0;fi'
if subp.call(cmd, shell=True) > 0:
raise IOError('Need to activate the virtualenv')
benchmarks = glob('examples/convnet-benchmarks/*.py')
results = []
for ex in benchmarks:
for dt_arg in ['f16', 'f32']:
print((ex, dt_arg))
ex_bn = os.path.basename(ex)
cmd = "python {} -d {}".format(ex, dt_arg)
rc = subp.call(cmd, shell=True)
results.append([ex, rc])
print('\n\n')
errors = 0
for dat in results:
if dat[1] != 0:
print('FAILURE on {}'.format(dat[0]))
errors += 1
sys.exit(errors)
| [
"os.path.isdir",
"os.path.basename",
"subprocess.call",
"sys.exit",
"glob.glob"
] | [((507, 547), 'glob.glob', 'glob', (['"""examples/convnet-benchmarks/*.py"""'], {}), "('examples/convnet-benchmarks/*.py')\n", (511, 547), False, 'from glob import glob\n'), ((949, 965), 'sys.exit', 'sys.exit', (['errors'], {}), '(errors)\n', (957, 965), False, 'import sys\n'), ((231, 256), 'os.path.isdir', 'os.path.isdir', (['"""examples"""'], {}), "('examples')\n", (244, 256), False, 'import os\n'), ((408, 434), 'subprocess.call', 'subp.call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (417, 434), True, 'import subprocess as subp\n'), ((662, 682), 'os.path.basename', 'os.path.basename', (['ex'], {}), '(ex)\n', (678, 682), False, 'import os\n'), ((748, 774), 'subprocess.call', 'subp.call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (757, 774), True, 'import subprocess as subp\n')] |
import dash_core_components as dcc
import dash_html_components as html
import dash
from dash.dependencies import Input, Output, State
import pandas as pd
import components
import data
import figures
from app import app
dfNutririon = data.dfNutririon
jumbotron = html.Div([
html.H4("Recipes", className='display-4'),
html.P("This application is for exploring the recipe data scraped from www.jamieoliver.com.", className='lead'),
html.Hr(className="my-4"),
], className='jumbotron mt-2 pb-1 pt-4')
plot1text = dcc.Markdown(
"""
### Nutrition by category
This is a violin plot showing the distribution of nutritional values of recipes as categorized on [www.jamieoliver.com](https://www.jamieoliver.com).
Use the dropdown boxes to select the category, nutritional variable and unit. The distributions are ordered by their median
value decreasing from left to right. They are also coloured according to their median value.
"""
)
plot1 = html.Div([
html.Div([
html.Div([
html.Label('Category'),
components.nutritionViolinGroupDropDown
], style={'float': 'left', 'width': 150, 'padding-left': '20px'}),
html.Div([
html.Label('Variable'),
components.nutritionViolinTypeDropDown
], style={'float': 'left', 'width': 150, 'padding-left': '10px'}),
html.Div([
html.Label('Unit'),
components.nutritionViolinMetricDropDown
], style={'float': 'left', 'width': 150, 'padding-left': '10px'}),
], className='row justify-content-center'),
html.Div([components.nutritionViolinGraph], style={'width': '100%'})
])
plot2text = dcc.Markdown(
"""
### Ingredient frequency by category
This bar chart shows the frequency of the selected ingridents in each subcategory.
Select multiple ingredients to see the frequency of combinations of ingredients.
"""
)
plot2 = html.Div([
html.Div([
html.Div([
html.Label('Category'),
components.ingredientsGroupDropDown
], style={'float': 'left', 'width': 150, 'padding-left': '20px'}),
html.Div([
html.Label('Ingredient'),
components.ingredientsDropDown
], style={'float': 'left', 'width': 500, 'padding-left': '10px'}),
], className='row justify-content-center'),
html.Div([components.ingredientGraph], style={'width': '100%'})
])
layout = html.Div([
html.Div([
jumbotron,
plot1text
], className='container'),
html.Div([plot1], className='container-fluid mt-8'),
html.Div([plot2text,
plot2], className='container mt-8')
])
@app.callback(Output('ingredientGraph', 'figure'),
[Input('ingredientsGroupDropDown', 'value'),
Input('ingredientsDropDown', 'value')])
def updateNutritionViolinGraph(group, ingredients):
return figures.ingredientDist(dfNutririon, group, ingredients)
@app.callback(Output('nutritionViolinGraph', 'figure'),
[Input('nutritionViolinGroupDropDown', 'value'),
Input('nutritionViolinMetricDropDown', 'value'),
Input('nutritionViolinTypeDropDown', 'value')])
def updateNutritionViolinGraph(group, metric, nutritionType):
return figures.nutritionViolin(dfNutririon, metric, group, nutritionType)
| [
"figures.ingredientDist",
"dash.dependencies.Output",
"dash.dependencies.Input",
"dash_html_components.Div",
"dash_html_components.Label",
"dash_core_components.Markdown",
"dash_html_components.Hr",
"dash_html_components.P",
"figures.nutritionViolin",
"dash_html_components.H4"
] | [((525, 950), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n### Nutrition by category\n\nThis is a violin plot showing the distribution of nutritional values of recipes as categorized on [www.jamieoliver.com](https://www.jamieoliver.com).\nUse the dropdown boxes to select the category, nutritional variable and unit. The distributions are ordered by their median\nvalue decreasing from left to right. They are also coloured according to their median value.\n"""'], {}), '(\n """\n### Nutrition by category\n\nThis is a violin plot showing the distribution of nutritional values of recipes as categorized on [www.jamieoliver.com](https://www.jamieoliver.com).\nUse the dropdown boxes to select the category, nutritional variable and unit. The distributions are ordered by their median\nvalue decreasing from left to right. They are also coloured according to their median value.\n"""\n )\n', (537, 950), True, 'import dash_core_components as dcc\n'), ((1787, 2021), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n### Ingredient frequency by category\n\nThis bar chart shows the frequency of the selected ingridents in each subcategory. \nSelect multiple ingredients to see the frequency of combinations of ingredients.\n"""'], {}), '(\n """\n### Ingredient frequency by category\n\nThis bar chart shows the frequency of the selected ingridents in each subcategory. \nSelect multiple ingredients to see the frequency of combinations of ingredients.\n"""\n )\n', (1799, 2021), True, 'import dash_core_components as dcc\n'), ((3150, 3205), 'figures.ingredientDist', 'figures.ingredientDist', (['dfNutririon', 'group', 'ingredients'], {}), '(dfNutririon, group, ingredients)\n', (3172, 3205), False, 'import figures\n'), ((2936, 2971), 'dash.dependencies.Output', 'Output', (['"""ingredientGraph"""', '"""figure"""'], {}), "('ingredientGraph', 'figure')\n", (2942, 2971), False, 'from dash.dependencies import Input, Output, State\n'), ((3527, 3593), 'figures.nutritionViolin', 'figures.nutritionViolin', (['dfNutririon', 'metric', 'group', 'nutritionType'], {}), '(dfNutririon, metric, group, nutritionType)\n', (3550, 3593), False, 'import figures\n'), ((3222, 3262), 'dash.dependencies.Output', 'Output', (['"""nutritionViolinGraph"""', '"""figure"""'], {}), "('nutritionViolinGraph', 'figure')\n", (3228, 3262), False, 'from dash.dependencies import Input, Output, State\n'), ((280, 321), 'dash_html_components.H4', 'html.H4', (['"""Recipes"""'], {'className': '"""display-4"""'}), "('Recipes', className='display-4')\n", (287, 321), True, 'import dash_html_components as html\n'), ((327, 448), 'dash_html_components.P', 'html.P', (['"""This application is for exploring the recipe data scraped from www.jamieoliver.com."""'], {'className': '"""lead"""'}), "(\n 'This application is for exploring the recipe data scraped from www.jamieoliver.com.'\n , className='lead')\n", (333, 448), True, 'import dash_html_components as html\n'), ((444, 469), 'dash_html_components.Hr', 'html.Hr', ([], {'className': '"""my-4"""'}), "(className='my-4')\n", (451, 469), True, 'import dash_html_components as html\n'), ((1694, 1762), 'dash_html_components.Div', 'html.Div', (['[components.nutritionViolinGraph]'], {'style': "{'width': '100%'}"}), "([components.nutritionViolinGraph], style={'width': '100%'})\n", (1702, 1762), True, 'import dash_html_components as html\n'), ((2543, 2606), 'dash_html_components.Div', 'html.Div', (['[components.ingredientGraph]'], {'style': "{'width': '100%'}"}), "([components.ingredientGraph], style={'width': '100%'})\n", (2551, 2606), True, 'import dash_html_components as html\n'), ((2651, 2706), 'dash_html_components.Div', 'html.Div', (['[jumbotron, plot1text]'], {'className': '"""container"""'}), "([jumbotron, plot1text], className='container')\n", (2659, 2706), True, 'import dash_html_components as html\n'), ((2766, 2817), 'dash_html_components.Div', 'html.Div', (['[plot1]'], {'className': '"""container-fluid mt-8"""'}), "([plot1], className='container-fluid mt-8')\n", (2774, 2817), True, 'import dash_html_components as html\n'), ((2831, 2887), 'dash_html_components.Div', 'html.Div', (['[plot2text, plot2]'], {'className': '"""container mt-8"""'}), "([plot2text, plot2], className='container mt-8')\n", (2839, 2887), True, 'import dash_html_components as html\n'), ((2988, 3030), 'dash.dependencies.Input', 'Input', (['"""ingredientsGroupDropDown"""', '"""value"""'], {}), "('ingredientsGroupDropDown', 'value')\n", (2993, 3030), False, 'from dash.dependencies import Input, Output, State\n'), ((3047, 3084), 'dash.dependencies.Input', 'Input', (['"""ingredientsDropDown"""', '"""value"""'], {}), "('ingredientsDropDown', 'value')\n", (3052, 3084), False, 'from dash.dependencies import Input, Output, State\n'), ((3279, 3325), 'dash.dependencies.Input', 'Input', (['"""nutritionViolinGroupDropDown"""', '"""value"""'], {}), "('nutritionViolinGroupDropDown', 'value')\n", (3284, 3325), False, 'from dash.dependencies import Input, Output, State\n'), ((3342, 3389), 'dash.dependencies.Input', 'Input', (['"""nutritionViolinMetricDropDown"""', '"""value"""'], {}), "('nutritionViolinMetricDropDown', 'value')\n", (3347, 3389), False, 'from dash.dependencies import Input, Output, State\n'), ((3406, 3451), 'dash.dependencies.Input', 'Input', (['"""nutritionViolinTypeDropDown"""', '"""value"""'], {}), "('nutritionViolinTypeDropDown', 'value')\n", (3411, 3451), False, 'from dash.dependencies import Input, Output, State\n'), ((1034, 1056), 'dash_html_components.Label', 'html.Label', (['"""Category"""'], {}), "('Category')\n", (1044, 1056), True, 'import dash_html_components as html\n'), ((1249, 1271), 'dash_html_components.Label', 'html.Label', (['"""Variable"""'], {}), "('Variable')\n", (1259, 1271), True, 'import dash_html_components as html\n'), ((1462, 1480), 'dash_html_components.Label', 'html.Label', (['"""Unit"""'], {}), "('Unit')\n", (1472, 1480), True, 'import dash_html_components as html\n'), ((2104, 2126), 'dash_html_components.Label', 'html.Label', (['"""Category"""'], {}), "('Category')\n", (2114, 2126), True, 'import dash_html_components as html\n'), ((2315, 2339), 'dash_html_components.Label', 'html.Label', (['"""Ingredient"""'], {}), "('Ingredient')\n", (2325, 2339), True, 'import dash_html_components as html\n')] |
import math
x = float(input('digite o cateto adjacente '))
y = float(input('digite o cateto oposto '))
a = math.hypot(x, y)
print(f'o valor da hipotenusa é {a}')
#caulculo no broço
co = float(input('digite o cateto oposto '))
ca = float(input('digite o cateto adjacente '))
hip = co*co + ca*ca
hipo = math.sqrt(hip)
print(f'a hipotenusa é {hipo}')
#agora usando o cosseno
cosseno = float(input('digite o angulo do cosseno '))
cateto = float(input('digite o tamanho do cateto '))
hipotenusa = cateto/math.cos(math.degrees(cosseno))
print(f'o valor da hipotenusa é {hipotenusa}')
print(f'cosseno de {cosseno} é {math.cos(math.radians(cosseno))}')
#usando o seno
seno = float(input('digite o angulo do seno '))
catsen = float(input('digite o tamanho do cateto '))
hipotsen = catsen/math.sin(math.degrees(seno))
print(f'o valor da hipotenusa é {hipotsen}')
| [
"math.hypot",
"math.radians",
"math.sqrt",
"math.degrees"
] | [((108, 124), 'math.hypot', 'math.hypot', (['x', 'y'], {}), '(x, y)\n', (118, 124), False, 'import math\n'), ((303, 317), 'math.sqrt', 'math.sqrt', (['hip'], {}), '(hip)\n', (312, 317), False, 'import math\n'), ((511, 532), 'math.degrees', 'math.degrees', (['cosseno'], {}), '(cosseno)\n', (523, 532), False, 'import math\n'), ((793, 811), 'math.degrees', 'math.degrees', (['seno'], {}), '(seno)\n', (805, 811), False, 'import math\n'), ((624, 645), 'math.radians', 'math.radians', (['cosseno'], {}), '(cosseno)\n', (636, 645), False, 'import math\n')] |
"""
<NAME>
MSc Bioinformatics
University of Copenhagen
November 2017
"""
import glob
import os
import numpy as np
from pathlib import Path
from .image_pair import ImagePair
from mpunet.logging import ScreenLogger
class ImagePairLoader(object):
"""
ImagePair data loader object
Represents a collection of ImagePairs
"""
def __init__(self,
base_dir="./",
img_subdir="images",
label_subdir="labels",
logger=None,
sample_weight=1.0,
bg_class=0,
predict_mode=False,
initialize_empty=False,
no_log=False,
identifier=None,
**kwargs):
"""
Initializes the ImagePairLoader object from all .nii files in a folder
or pair of folders if labels are also specified.
If initialize_empty=False, the following actions are taken immediately
on initialization:
- All .nii/.nii.gz image files are found in base_dir/img_subdir
- Unless predict_mode=True, finds all .nii/.nii.gz label files in
base_dir/label_subdir
- ImagePair objects are established for all images/image-label
pairs. Not that since ImagePairs do not eagerly load data,
the ImagePairLoader also does not immediately load data into mem
If initialize_empty=True, the class is initialized but no images are
loaded. Images can be manually added through the add_image and
add_files methods.
Args:
base_dir: A path to a directory storing the 'img_subdir'
and 'label_subdir' sub-folders
img_subdir: Name of sub-folder storing .nii images files
label_subdir: Name of sub-folder storing .nii labels files
logger: mpunet logger object
sample_weight: A float giving a global sample weight assigned
to all images loaded by the ImagePairLoader
bg_class Background class integer to pass to all
ImagePair objects. Usually int(0).
predict_mode: Boolean whether labels exist for the images.
If True, the labels are assumed stored in the
label_subdir with names identical to the images
initialize_empty: Boolean, if True do not load any images at init
This may be useful for manually assigning
individual image files to the object.
no_log: Boolean, whether to not log to screen/file
identifier: Optional name for the dataset
**kwargs: Other keywords arguments
"""
self.logger = logger if logger is not None else ScreenLogger()
# Set absolute paths to main folder, image folder and label folder
self.data_dir = Path(base_dir).absolute()
self.images_path = self.data_dir / img_subdir
self.identifier = self.data_dir.name
# Labels included?
self.predict_mode = predict_mode or not label_subdir
if not predict_mode:
self.labels_path = self.data_dir / label_subdir
else:
self.labels_path = None
# Load images unless initialize_empty is specified
if not initialize_empty:
# Get paths to all images
self.image_paths = self.get_image_paths()
if not predict_mode:
# Get paths to labels if included
self.label_paths = self.get_label_paths(img_subdir,
label_subdir)
else:
self.label_paths = None
# Load all nii objects
self.images = self.get_image_objects(sample_weight, bg_class)
else:
self.images = []
if not initialize_empty and not self.image_paths:
raise OSError("No image files found at %s." % self.images_path)
if not initialize_empty and not predict_mode and not self.label_paths:
raise OSError("No label files found at %s." % self.labels_path)
self._id_to_image = self.get_id_to_images_dict()
if not no_log:
self._log()
def __str__(self):
return "ImagePairLoader(id={}, images={}, data_dir={})".format(
self.identifier, len(self), self.data_dir
)
def __repr__(self):
return self.__str__()
def __getitem__(self, item):
return self.images[item]
def __iter__(self):
for im in self.images:
yield im
def __len__(self):
return len(self.images)
def _log(self):
self.logger(str(self))
self.logger("--- Image subdir: %s\n--- Label subdir: %s" % (self.images_path,
self.labels_path))
def load(self):
""" Invokes the 'load' method on all ImagePairs """
for image in self:
image.load()
def unload(self):
""" Invokes the 'unload' method on all ImagePairs """
for image in self:
image.unload()
@property
def id_to_image(self):
"""
Returns:
A dictionary of image IDs pointing to image objects
"""
return self._id_to_image
def get_id_to_images_dict(self):
return {image.identifier: image for image in self}
@property
def n_loaded(self):
return sum([image.is_loaded for image in self.images])
def get_by_id(self, image_id):
"""
Get a specific ImagePair by its string identifier
Args:
image_id: String identifier of an ImagePair
Returns:
An ImagePair
"""
return self.id_to_image[image_id]
def get_random(self, N=1, unique=False):
"""
Return N random images, with or without re-sampling
Args:
N: Int, number of randomly sampled images to return
unique: Bool, whether the sampled images should be all unique
Returns:
A list of ImagePair objects
"""
returned = []
while len(returned) < N:
if self.queue:
with self.queue.get() as image:
if unique and image in returned:
continue
else:
returned.append(image)
yield image
else:
image = self.images[np.random.randint(len(self))]
if unique and image in returned:
continue
else:
returned.append(image)
yield image
def _get_paths_from_list_file(self, base_path, fname="LIST_OF_FILES.txt"):
"""
Loads a set of paths pointing to .nii files in 'base_path'.
This method is used in the rare cases that images are not directly
stored in self.images_path or self.labels_path but those paths stores
a file named 'fname' storing 1 absolute path per line pointing to the
images to load.
Args:
base_path: A path to a folder
fname: The filename of the file at 'base_path' that stores the
paths to return
Returns:
A list of path strings
"""
# Check if a file listing paths exists instead of actual files at the
# image sub folder path
list_file_path = base_path / fname
images = []
if os.path.exists(list_file_path):
with open(list_file_path, "r") as in_f:
for path in in_f:
path = path.strip()
if not path:
continue
images.append(path)
else:
raise OSError("File '%s' does not exist. Did you specify "
"the correct img_subdir?" % list_file_path)
return images
def get_image_paths(self):
"""
Return a list of paths to all image files in the self.images_path folder
Returns:
A list of pathlib.Path
"""
images = sorted(glob.glob(str(self.images_path / "*.nii*")))
if not images:
# Try to load from a file listing paths at the location
# This is sometimes a format created by the cv_split.py script
images = self._get_paths_from_list_file(self.images_path)
return [Path(p) for p in images]
def get_label_paths(self, img_subdir, label_subdir):
"""
Return a list of paths to all label files in the self.labels_path folder
The label paths are assumed to be identical to the image paths with the
image subdir name replaced by the label subdir name.
Args:
img_subdir: String, name of the image sub-folder
label_subdir: String, name of the label sub-folder
Returns:
A list of pathlib.Path
"""
if any([img_subdir not in str(p) for p in self.image_paths]):
raise ValueError("Mismatch between image paths and specified "
"img_subdir. The subdir was not found in one or"
" more image paths - Do the paths in "
"LIST_OF_FILES.txt point to a subdir of name "
"'%s'?" % img_subdir)
return [p.parent.parent / label_subdir / p.name for p in self.image_paths]
def get_image_objects(self, sample_weight, bg_class):
"""
Initialize all ImagePair objects from paths at self.image_paths and
self.label_paths (if labels exist). Note that data is not loaded
eagerly.
Args:
sample_weight: A float giving the weight to assign to the ImagePair
bg_class: Background (integer) class
Returns:
A list of initialized ImagePairs
"""
image_objects = []
if self.predict_mode:
for img_path in self.image_paths:
image = ImagePair(img_path,
sample_weight=sample_weight,
bg_class=bg_class,
logger=self.logger)
image_objects.append(image)
else:
for img_path, label_path in zip(self.image_paths, self.label_paths):
image = ImagePair(img_path, label_path,
sample_weight=sample_weight,
bg_class=bg_class,
logger=self.logger)
image_objects.append(image)
return image_objects
def add_image(self, image_pair):
"""
Add a single ImagePair object to the ImagePairLoader
Args:
image_pair: An ImagePair
"""
self.images.append(image_pair)
# Update ID dict
self._id_to_image = self.get_id_to_images_dict()
def add_images(self, image_pair_loader):
"""
Add a set of ImagePair objects to the ImagePairLoader. Input can be
either a different ImagePairLoader object or a list of ImagePairs.
Args:
image_pair_loader: ImagePairLoader or list of ImagePairs
Returns:
self
"""
try:
self.images += image_pair_loader.images
except AttributeError:
# Passed as list?
self.images += list(image_pair_loader)
# Update ID dict
self._id_to_image = self.get_id_to_images_dict()
return self
def get_maximum_real_dim(self):
"""
Returns the longest distance in mm covered by any axis across images
of this ImagePairLoader.
Returns:
A float
"""
from mpunet.interpolation.sample_grid import get_maximum_real_dim
return np.max([get_maximum_real_dim(f.image_obj) for f in self])
def set_scaler_and_bg_values(self, bg_value, scaler, compute_now=False):
"""
Loads all images and prepares them for iso-live view interpolation
training by performing the following operations on each:
1) Loads the image and labels if not already loaded (transparent)
2) Define proper background value
3) Setting multi-channel scaler
4) Setting interpolator object
Args:
bg_value: See ImagePair.set_bg_value
scaler: See ImagePair.set_scaler
compute_now: TODO
"""
# Run over volumes: scale, set interpolator, check for affine
for image in self.id_to_image.values():
image.set_bg_value(bg_value, compute_now=compute_now)
image.set_scaler(scaler, compute_now=compute_now)
image.log_image()
| [
"os.path.exists",
"mpunet.interpolation.sample_grid.get_maximum_real_dim",
"mpunet.logging.ScreenLogger",
"pathlib.Path"
] | [((7781, 7811), 'os.path.exists', 'os.path.exists', (['list_file_path'], {}), '(list_file_path)\n', (7795, 7811), False, 'import os\n'), ((2966, 2980), 'mpunet.logging.ScreenLogger', 'ScreenLogger', ([], {}), '()\n', (2978, 2980), False, 'from mpunet.logging import ScreenLogger\n'), ((8733, 8740), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (8737, 8740), False, 'from pathlib import Path\n'), ((3081, 3095), 'pathlib.Path', 'Path', (['base_dir'], {}), '(base_dir)\n', (3085, 3095), False, 'from pathlib import Path\n'), ((12195, 12228), 'mpunet.interpolation.sample_grid.get_maximum_real_dim', 'get_maximum_real_dim', (['f.image_obj'], {}), '(f.image_obj)\n', (12215, 12228), False, 'from mpunet.interpolation.sample_grid import get_maximum_real_dim\n')] |
import argparse
import os
import sys
import time
from Utils import run_cmd, run_cmds, run_cmd_capture_output
from Utils import SUCCESS, FAILURE
from release_tools import find_conda_activate, create_fake_feedstock
from release_tools import prep_conda_env, check_if_conda_forge_pkg, clone_feedstock
from release_tools import clone_repo, prepare_recipe_in_local_feedstock_repo
from release_tools import copy_files_from_repo
from release_tools import prepare_recipe_in_local_repo, rerender, do_build
from release_tools import rerender_in_local_feedstock, build_in_local_feedstock
from release_tools import rerender_in_local_repo, build_in_local_repo, get_git_rev
l = time.localtime()
cwd = os.getcwd()
#
# This script is to be run under a CDAT project repo directory.
#
# This script can be used to build CDAT packages that go to cdat channel
# (ex: cdat/label/nightly) and CDAT packages that go to cdat channel
# but eventually will get uploaded to conda-forge (i.e. packages that have
# conda-forge feedstock repo.
#
# For conda-forge packages:
# + clone the feedstock to <workdir>/<pkg_name>-feedstock directory.
# + clone the project repo to <workdir>/<repo_name> directory.
# + if project repo has recipe/meta.yaml.in, will build using the project repo recipe.
# This should be the case when the project branch is modifying the recipe
# (i.e. different from the feedstock's recipe).
# IMPORTANT: when we release the package to conda-forge, we have to remove
# the project repo's recipe.
#
# For non conda-forge packages (packages that are uploaded to cdat/label/nightly
# or cdat/label/<release>:
# + clone the project repo to <workdir>/<repo_name> directory.
#
# The need to reclone the project repo is because rerendering will
# overwrite .circleci/config.yml, and this is problematic if we are running
# this script in CircleCI.
#
conda_rc = os.path.join(os.getcwd(), "condarc")
parser = argparse.ArgumentParser(
description='conda build upload',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-p", "--package_name",
help="Package name to build")
parser.add_argument("-o", "--organization",
help="github organization name", default="CDAT")
parser.add_argument("-r", "--repo_name",
help="repo name to build")
parser.add_argument("-b", "--branch", default='master', help="branch to build")
parser.add_argument("-v", "--version",
help="version are we building for")
parser.add_argument("-l", "--last_stable",
help="last stable (released) version, specify this when building for nightly")
parser.add_argument("-w", "--workdir", default=cwd, help="work full path directory")
parser.add_argument("-B", "--build", default="0", help="build number, this should be 0 for nightly")
parser.add_argument("-C", "--conda_clean", action='store_true', help="do 'conda clean --all'")
parser.add_argument("--do_rerender", action='store_true', help="do 'conda smithy rerender'")
parser.add_argument("--do_build", action='store_true', help="do 'conda build -m <variant file> ...'")
parser.add_argument("--build_version", default="3.7", help="specify python version to build 2.7, 3.7, 3.8")
parser.add_argument("--conda_env", default="base", help="Conda environment to use, will be created if it doesn't exist")
parser.add_argument("--extra_channels", nargs="+", type=str, default=[])
parser.add_argument("--ignore_conda_missmatch", action="store_true", help="Will skip checking if packages are uptodate when rerendering recipe.")
parser.add_argument("--conda_rc", default=conda_rc, help="File to use for condarc")
parser.add_argument("--conda_activate", help="Path to conda activate script.")
parser.add_argument("--copy_conda_package", help="Copies output conda package to directory")
parser.add_argument("--local_repo", help="Path to local project repository")
args = parser.parse_args(sys.argv[1:])
print(args)
pkg_name = args.package_name
branch = args.branch
workdir = args.workdir
build = args.build
do_conda_clean = args.conda_clean
local_repo = args.local_repo
if local_repo is not None and not os.path.exists(local_repo):
print("Local repository {} does not exist".format(local_repo))
sys.exit(FAILURE)
status = FAILURE
# for calling run_cmds
join_stderr = True
shell_cmd = False
verbose = True
version = None
def construct_pkg_ver(repo_dir, arg_version, arg_last_stable):
git_rev = get_git_rev(repo_dir)
if arg_version:
# we are building for a release of a non conda-forge package
version = arg_version
else:
# we are building for nightly
today2 = "%s.%.2i.%.2i.%.2i.%.2i.%.2i.%s" % (arg_last_stable, l.tm_year, l.tm_mon, l.tm_mday, l.tm_hour, l.tm_min, git_rev)
version = today2
return version
#
# main
#
kwargs = vars(args)
kwargs["conda_activate"] = args.conda_activate or find_conda_activate()
if kwargs["repo_name"] is None:
kwargs["repo_name"] = pkg_name
repo_name = kwargs["repo_name"]
if kwargs["conda_activate"] is None or not os.path.exists(kwargs["conda_activate"]):
print("Could not find conda activate script, try passing with --conda_activate argument and check file exists")
sys.exit(FAILURE)
is_conda_forge_pkg = check_if_conda_forge_pkg(pkg_name)
status = prep_conda_env(**kwargs)
if status != SUCCESS:
sys.exit(status)
if args.do_rerender:
if local_repo is None:
ret, repo_dir = clone_repo(**kwargs)
if ret != SUCCESS:
sys.exit(ret)
else:
repo_dir = local_repo
kwargs["version"] = version = construct_pkg_ver(repo_dir, args.version, args.last_stable)
else:
if local_repo is None:
repo_dir = os.path.join(workdir, repo_name)
else:
repo_dir = local_repo
print("repo_dir: {d}".format(d=repo_dir))
files = ["recipe/conda_build_config.yaml",
"recipe/build.sh",
".ci_support/migrations/python38.yaml",
".ci_support/migrations/hdf51106.yaml"]
if is_conda_forge_pkg:
if args.do_rerender:
status = clone_feedstock(**kwargs)
if status != SUCCESS:
sys.exit(status)
status = prepare_recipe_in_local_feedstock_repo(pkg_version=version, repo_dir=repo_dir, **kwargs)
if status != SUCCESS:
sys.exit(status)
status = copy_files_from_repo(repo_dir=repo_dir, filenames=files, **kwargs)
if status != SUCCESS:
sys.exit(status)
status = rerender_in_local_feedstock(**kwargs)
if args.do_build:
status = build_in_local_feedstock(**kwargs)
else:
print("Building non conda-forge package")
print("...branch: {b}".format(b=branch))
print("...build: {b}".format(b=build))
print("...repo_dir: {d}".format(d=repo_dir))
if args.do_rerender:
status = prepare_recipe_in_local_repo(repo_dir=repo_dir, **kwargs)
if status != SUCCESS:
sys.exit(status)
# Create a fake feedstock in the workdir to run conda smithy in
feedstock_dir = create_fake_feedstock(repo_dir=repo_dir, **kwargs)
status = copy_files_from_repo(repo_dir=repo_dir, filenames=files, **kwargs)
if status != SUCCESS:
sys.exit(status)
status = rerender_in_local_repo(repo_dir=feedstock_dir, **kwargs)
else:
feedstock_dir = os.path.join(workdir, "{}-feedstock".format(pkg_name))
if args.do_build:
status = build_in_local_repo(repo_dir=feedstock_dir, **kwargs)
sys.exit(status)
| [
"release_tools.rerender_in_local_repo",
"release_tools.get_git_rev",
"sys.exit",
"os.path.exists",
"argparse.ArgumentParser",
"release_tools.find_conda_activate",
"time.localtime",
"release_tools.prep_conda_env",
"release_tools.prepare_recipe_in_local_repo",
"release_tools.copy_files_from_repo",
"release_tools.create_fake_feedstock",
"release_tools.build_in_local_repo",
"release_tools.clone_repo",
"release_tools.prepare_recipe_in_local_feedstock_repo",
"release_tools.clone_feedstock",
"os.path.join",
"os.getcwd",
"release_tools.check_if_conda_forge_pkg",
"release_tools.build_in_local_feedstock",
"release_tools.rerender_in_local_feedstock"
] | [((666, 682), 'time.localtime', 'time.localtime', ([], {}), '()\n', (680, 682), False, 'import time\n'), ((689, 700), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (698, 700), False, 'import os\n'), ((1907, 2025), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""conda build upload"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='conda build upload', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (1930, 2025), False, 'import argparse\n'), ((5265, 5299), 'release_tools.check_if_conda_forge_pkg', 'check_if_conda_forge_pkg', (['pkg_name'], {}), '(pkg_name)\n', (5289, 5299), False, 'from release_tools import prep_conda_env, check_if_conda_forge_pkg, clone_feedstock\n'), ((5310, 5334), 'release_tools.prep_conda_env', 'prep_conda_env', ([], {}), '(**kwargs)\n', (5324, 5334), False, 'from release_tools import prep_conda_env, check_if_conda_forge_pkg, clone_feedstock\n'), ((7490, 7506), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (7498, 7506), False, 'import sys\n'), ((1873, 1884), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1882, 1884), False, 'import os\n'), ((4243, 4260), 'sys.exit', 'sys.exit', (['FAILURE'], {}), '(FAILURE)\n', (4251, 4260), False, 'import sys\n'), ((4448, 4469), 'release_tools.get_git_rev', 'get_git_rev', (['repo_dir'], {}), '(repo_dir)\n', (4459, 4469), False, 'from release_tools import rerender_in_local_repo, build_in_local_repo, get_git_rev\n'), ((4897, 4918), 'release_tools.find_conda_activate', 'find_conda_activate', ([], {}), '()\n', (4916, 4918), False, 'from release_tools import find_conda_activate, create_fake_feedstock\n'), ((5225, 5242), 'sys.exit', 'sys.exit', (['FAILURE'], {}), '(FAILURE)\n', (5233, 5242), False, 'import sys\n'), ((5361, 5377), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (5369, 5377), False, 'import sys\n'), ((4144, 4170), 'os.path.exists', 'os.path.exists', (['local_repo'], {}), '(local_repo)\n', (4158, 4170), False, 'import os\n'), ((5063, 5103), 'os.path.exists', 'os.path.exists', (["kwargs['conda_activate']"], {}), "(kwargs['conda_activate'])\n", (5077, 5103), False, 'import os\n'), ((5451, 5471), 'release_tools.clone_repo', 'clone_repo', ([], {}), '(**kwargs)\n', (5461, 5471), False, 'from release_tools import clone_repo, prepare_recipe_in_local_feedstock_repo\n'), ((5712, 5744), 'os.path.join', 'os.path.join', (['workdir', 'repo_name'], {}), '(workdir, repo_name)\n', (5724, 5744), False, 'import os\n'), ((6063, 6088), 'release_tools.clone_feedstock', 'clone_feedstock', ([], {}), '(**kwargs)\n', (6078, 6088), False, 'from release_tools import prep_conda_env, check_if_conda_forge_pkg, clone_feedstock\n'), ((6166, 6259), 'release_tools.prepare_recipe_in_local_feedstock_repo', 'prepare_recipe_in_local_feedstock_repo', ([], {'pkg_version': 'version', 'repo_dir': 'repo_dir'}), '(pkg_version=version, repo_dir=\n repo_dir, **kwargs)\n', (6204, 6259), False, 'from release_tools import clone_repo, prepare_recipe_in_local_feedstock_repo\n'), ((6332, 6398), 'release_tools.copy_files_from_repo', 'copy_files_from_repo', ([], {'repo_dir': 'repo_dir', 'filenames': 'files'}), '(repo_dir=repo_dir, filenames=files, **kwargs)\n', (6352, 6398), False, 'from release_tools import copy_files_from_repo\n'), ((6476, 6513), 'release_tools.rerender_in_local_feedstock', 'rerender_in_local_feedstock', ([], {}), '(**kwargs)\n', (6503, 6513), False, 'from release_tools import rerender_in_local_feedstock, build_in_local_feedstock\n'), ((6554, 6588), 'release_tools.build_in_local_feedstock', 'build_in_local_feedstock', ([], {}), '(**kwargs)\n', (6578, 6588), False, 'from release_tools import rerender_in_local_feedstock, build_in_local_feedstock\n'), ((6822, 6879), 'release_tools.prepare_recipe_in_local_repo', 'prepare_recipe_in_local_repo', ([], {'repo_dir': 'repo_dir'}), '(repo_dir=repo_dir, **kwargs)\n', (6850, 6879), False, 'from release_tools import prepare_recipe_in_local_repo, rerender, do_build\n'), ((7036, 7086), 'release_tools.create_fake_feedstock', 'create_fake_feedstock', ([], {'repo_dir': 'repo_dir'}), '(repo_dir=repo_dir, **kwargs)\n', (7057, 7086), False, 'from release_tools import find_conda_activate, create_fake_feedstock\n'), ((7105, 7171), 'release_tools.copy_files_from_repo', 'copy_files_from_repo', ([], {'repo_dir': 'repo_dir', 'filenames': 'files'}), '(repo_dir=repo_dir, filenames=files, **kwargs)\n', (7125, 7171), False, 'from release_tools import copy_files_from_repo\n'), ((7249, 7305), 'release_tools.rerender_in_local_repo', 'rerender_in_local_repo', ([], {'repo_dir': 'feedstock_dir'}), '(repo_dir=feedstock_dir, **kwargs)\n', (7271, 7305), False, 'from release_tools import rerender_in_local_repo, build_in_local_repo, get_git_rev\n'), ((7435, 7488), 'release_tools.build_in_local_repo', 'build_in_local_repo', ([], {'repo_dir': 'feedstock_dir'}), '(repo_dir=feedstock_dir, **kwargs)\n', (7454, 7488), False, 'from release_tools import rerender_in_local_repo, build_in_local_repo, get_git_rev\n'), ((5511, 5524), 'sys.exit', 'sys.exit', (['ret'], {}), '(ret)\n', (5519, 5524), False, 'import sys\n'), ((6131, 6147), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (6139, 6147), False, 'import sys\n'), ((6297, 6313), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (6305, 6313), False, 'import sys\n'), ((6441, 6457), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (6449, 6457), False, 'import sys\n'), ((6922, 6938), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (6930, 6938), False, 'import sys\n'), ((7214, 7230), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (7222, 7230), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-08-07 01:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('recipe', '0007_auto_20170723_2046'),
]
operations = [
migrations.CreateModel(
name='SubRecipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(blank=True, null=True, verbose_name='quantity')),
('measurement', models.TextField(blank=True, null=True, verbose_name='measurement')),
('child_recipe', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='child_recipe', to='recipe.Recipe', verbose_name='subrecipe')),
('parent_recipe', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='parent_recipe', to='recipe.Recipe', verbose_name='parent_recipe')),
],
),
migrations.AddField(
model_name='recipe',
name='subrecipes',
field=models.ManyToManyField(through='recipe.SubRecipe', to='recipe.Recipe', verbose_name='subrecipes'),
),
]
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.AutoField"
] | [((1232, 1333), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'through': '"""recipe.SubRecipe"""', 'to': '"""recipe.Recipe"""', 'verbose_name': '"""subrecipes"""'}), "(through='recipe.SubRecipe', to='recipe.Recipe',\n verbose_name='subrecipes')\n", (1254, 1333), False, 'from django.db import migrations, models\n'), ((428, 521), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (444, 521), False, 'from django.db import migrations, models\n'), ((549, 616), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""quantity"""'}), "(blank=True, null=True, verbose_name='quantity')\n", (568, 616), False, 'from django.db import migrations, models\n'), ((651, 718), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""measurement"""'}), "(blank=True, null=True, verbose_name='measurement')\n", (667, 718), False, 'from django.db import migrations, models\n'), ((754, 906), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""child_recipe"""', 'to': '"""recipe.Recipe"""', 'verbose_name': '"""subrecipe"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='child_recipe', to='recipe.Recipe', verbose_name='subrecipe')\n", (771, 906), False, 'from django.db import migrations, models\n'), ((939, 1101), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""parent_recipe"""', 'to': '"""recipe.Recipe"""', 'verbose_name': '"""parent_recipe"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='parent_recipe', to='recipe.Recipe', verbose_name=\n 'parent_recipe')\n", (956, 1101), False, 'from django.db import migrations, models\n')] |
import logging
import time
from django.core.management import BaseCommand, CommandError
from polaris.deposit.utils import create_stellar_deposit
from polaris.integrations import registered_deposit_integration as rdi
from polaris.models import Transaction
logger = logging.getLogger(__name__)
def execute_deposit(transaction: Transaction) -> bool:
"""
The external deposit has been completed, so the transaction
status must now be updated to *pending_anchor*. Executes the
transaction by calling :func:`create_stellar_deposit`.
:param transaction: the transaction to be executed
:returns a boolean of whether or not the transaction was
completed successfully on the Stellar network.
"""
if transaction.kind != transaction.KIND.deposit:
raise ValueError("Transaction not a deposit")
elif transaction.status != transaction.STATUS.pending_user_transfer_start:
raise ValueError(
f"Unexpected transaction status: {transaction.status}, expecting "
f"{transaction.STATUS.pending_user_transfer_start}"
)
transaction.status = Transaction.STATUS.pending_anchor
transaction.status_eta = 5 # Ledger close time.
transaction.save()
# launch the deposit Stellar transaction.
return create_stellar_deposit(transaction.id)
class Command(BaseCommand):
"""
Polls the anchor's financial entity, gathers ready deposit transactions
for execution, and executes them. This process can be run in a loop,
restarting every 10 seconds (or a user-defined time period)
"""
def add_arguments(self, parser):
parser.add_argument(
"--loop",
action="store_true",
help="Continually restart command after a specified "
"number of seconds (10)",
)
parser.add_argument(
"--interval",
"-i",
type=int,
nargs=1,
help="The number of seconds to wait before "
"restarting command. Defaults to 10.",
)
def handle(self, *args, **options):
if options.get("loop"):
while True:
self.execute_deposits()
time.sleep(options.get("interval") or 10)
else:
self.execute_deposits()
@classmethod
def execute_deposits(cls):
pending_deposits = Transaction.objects.filter(
kind=Transaction.KIND.deposit,
status=Transaction.STATUS.pending_user_transfer_start,
)
try:
ready_transactions = rdi.poll_pending_deposits(pending_deposits)
except NotImplementedError as e:
raise CommandError(e)
for transaction in ready_transactions:
try:
success = execute_deposit(transaction)
except ValueError as e:
logger.error(f"poll_pending_transactions: {str(e)}")
continue
if success:
rdi.after_deposit(transaction)
| [
"logging.getLogger",
"polaris.integrations.registered_deposit_integration.after_deposit",
"polaris.integrations.registered_deposit_integration.poll_pending_deposits",
"polaris.models.Transaction.objects.filter",
"polaris.deposit.utils.create_stellar_deposit",
"django.core.management.CommandError"
] | [((266, 293), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (283, 293), False, 'import logging\n'), ((1284, 1322), 'polaris.deposit.utils.create_stellar_deposit', 'create_stellar_deposit', (['transaction.id'], {}), '(transaction.id)\n', (1306, 1322), False, 'from polaris.deposit.utils import create_stellar_deposit\n'), ((2373, 2490), 'polaris.models.Transaction.objects.filter', 'Transaction.objects.filter', ([], {'kind': 'Transaction.KIND.deposit', 'status': 'Transaction.STATUS.pending_user_transfer_start'}), '(kind=Transaction.KIND.deposit, status=\n Transaction.STATUS.pending_user_transfer_start)\n', (2399, 2490), False, 'from polaris.models import Transaction\n'), ((2567, 2610), 'polaris.integrations.registered_deposit_integration.poll_pending_deposits', 'rdi.poll_pending_deposits', (['pending_deposits'], {}), '(pending_deposits)\n', (2592, 2610), True, 'from polaris.integrations import registered_deposit_integration as rdi\n'), ((2670, 2685), 'django.core.management.CommandError', 'CommandError', (['e'], {}), '(e)\n', (2682, 2685), False, 'from django.core.management import BaseCommand, CommandError\n'), ((2975, 3005), 'polaris.integrations.registered_deposit_integration.after_deposit', 'rdi.after_deposit', (['transaction'], {}), '(transaction)\n', (2992, 3005), True, 'from polaris.integrations import registered_deposit_integration as rdi\n')] |
from unittest import TestCase
from ..functions import permutations\
, getPerms\
, check_data_format\
, randomize_permutation_data\
, is_list_of_tuples
import numpy as np
from pprint import pprint
class TestPermutations(TestCase):
def test_getPerms(self):
data = [(1,1),(2,2) ,(3,3),(4,4),(5,5)]
n = 2
res = getPerms(data, n)
pprint(res)
val = \
[([(1, 1), (2, 2)], [(3, 3), (4, 4), (5, 5)]),\
([(1, 1), (3, 3)], [(2, 2), (4, 4), (5, 5)]),\
([(1, 1), (4, 4)], [(2, 2), (3, 3), (5, 5)]),\
([(1, 1), (5, 5)], [(2, 2), (3, 3), (4, 4)]),\
([(2, 2), (3, 3)], [(1, 1), (4, 4), (5, 5)]),\
([(2, 2), (4, 4)], [(1, 1), (3, 3), (5, 5)]),\
([(2, 2), (5, 5)], [(1, 1), (3, 3), (4, 4)]),\
([(3, 3), (4, 4)], [(1, 1), (2, 2), (5, 5)]),\
([(3, 3), (5, 5)], [(1, 1), (2, 2), (4, 4)]),\
([(4, 4), (5, 5)], [(1, 1), (2, 2), (3, 3)])]\
self.assertEqual(res,val)
def test_check_data_format_1(self):
dat = \
[([(1, 1), (2, 2)], [(3, 3), (4, 4), (5, 5)]),\
([(1, 1), (3, 3)], [(2, 2), (4, 4), (5, 5)]),\
([(1, 1), (4, 4)], [(2, 2), (3, 3), (5, 5)])]
self.assertEqual(check_data_format(dat),2)
def test_check_data_format_2(self):
dat = \
[([1, 2], [3, 4, 5]),\
([1, 3], [2, 4, 5]),\
([1, 4], [2, 3, 5])]
self.assertEqual(check_data_format(dat),1)
def test_check_data_format_3(self):
dat = \
[([1, 2], [3, 4, 5]),\
([1, 3], [(2, 2), 4, 5]),\
([1, 4], [2, 3, 5])]
self.assertEqual(check_data_format(dat),0)
def test_check_data_format_4(self):
dat = \
[([1, 2], [3, 4, 5]),\
([1, 3], [2, 4, 5],[3,2]),\
([1, 4], [2, 3, 5])]
self.assertEqual(check_data_format(dat),0)
def test_randomize_permutation_data(self):
dat = ([(1, 0.01), (2, 0.01)], [(3, 0.01), (4, 0.01), (5, 0.01)])
val = ([1., 2.], [3., 4., 5.])
res = randomize_permutation_data(dat)
pprint(dat)
pprint(res)
#self.assertEqual(res, val)
def test_is_list_of_tuples_1(self):
a = [(1,2), (3,4), (5,6)]
self.assertTrue(is_list_of_tuples(a))
def test_is_list_of_tuples_2(self):
a = [(1,2), (3,4,4), (5,6)]
self.assertFalse(is_list_of_tuples(a))
def test_is_list_of_tuples_3(self):
a = [1,2,3,4]
self.assertFalse(is_list_of_tuples(a))
| [
"pprint.pprint"
] | [((452, 463), 'pprint.pprint', 'pprint', (['res'], {}), '(res)\n', (458, 463), False, 'from pprint import pprint\n'), ((2210, 2221), 'pprint.pprint', 'pprint', (['dat'], {}), '(dat)\n', (2216, 2221), False, 'from pprint import pprint\n'), ((2230, 2241), 'pprint.pprint', 'pprint', (['res'], {}), '(res)\n', (2236, 2241), False, 'from pprint import pprint\n')] |
import pytest
def test_string_list():
from pybind11_tests import StringList, ClassWithSTLVecProperty, print_opaque_list
l = StringList()
l.push_back("Element 1")
l.push_back("Element 2")
if print_opaque_list(l) != "Opaque list: [Element 1, Element 2]":
raise AssertionError
if l.back() != "Element 2":
raise AssertionError
for i, k in enumerate(l, start=1):
if k != "Element {}".format(i):
raise AssertionError
l.pop_back()
if print_opaque_list(l) != "Opaque list: [Element 1]":
raise AssertionError
cvp = ClassWithSTLVecProperty()
if print_opaque_list(cvp.stringList) != "Opaque list: []":
raise AssertionError
cvp.stringList = l
cvp.stringList.push_back("Element 3")
if print_opaque_list(cvp.stringList) != "Opaque list: [Element 1, Element 3]":
raise AssertionError
def test_pointers(msg):
from pybind11_tests import (return_void_ptr, get_void_ptr_value, ExampleMandA,
print_opaque_list, return_null_str, get_null_str_value,
return_unique_ptr, ConstructorStats)
living_before = ConstructorStats.get(ExampleMandA).alive()
if get_void_ptr_value(return_void_ptr()) != 0x1234:
raise AssertionError
if not get_void_ptr_value(ExampleMandA()):
raise AssertionError
if ConstructorStats.get(ExampleMandA).alive() != living_before:
raise AssertionError
with pytest.raises(TypeError) as excinfo:
get_void_ptr_value([1, 2, 3]) # This should not work
if msg(excinfo.value) != """
get_void_ptr_value(): incompatible function arguments. The following argument types are supported:
1. (arg0: capsule) -> int
Invoked with: [1, 2, 3]
""":
raise AssertionError
if return_null_str() is not None:
raise AssertionError
if get_null_str_value(return_null_str()) is None:
raise AssertionError
ptr = return_unique_ptr()
if "StringList" not in repr(ptr):
raise AssertionError
if print_opaque_list(ptr) != "Opaque list: [some value]":
raise AssertionError
| [
"pybind11_tests.get_void_ptr_value",
"pybind11_tests.print_opaque_list",
"pybind11_tests.return_void_ptr",
"pybind11_tests.return_null_str",
"pybind11_tests.return_unique_ptr",
"pytest.raises",
"pybind11_tests.ExampleMandA",
"pybind11_tests.StringList",
"pybind11_tests.ConstructorStats.get",
"pybind11_tests.ClassWithSTLVecProperty"
] | [((135, 147), 'pybind11_tests.StringList', 'StringList', ([], {}), '()\n', (145, 147), False, 'from pybind11_tests import StringList, ClassWithSTLVecProperty, print_opaque_list\n'), ((595, 620), 'pybind11_tests.ClassWithSTLVecProperty', 'ClassWithSTLVecProperty', ([], {}), '()\n', (618, 620), False, 'from pybind11_tests import StringList, ClassWithSTLVecProperty, print_opaque_list\n'), ((1999, 2018), 'pybind11_tests.return_unique_ptr', 'return_unique_ptr', ([], {}), '()\n', (2016, 2018), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n'), ((213, 233), 'pybind11_tests.print_opaque_list', 'print_opaque_list', (['l'], {}), '(l)\n', (230, 233), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n'), ((503, 523), 'pybind11_tests.print_opaque_list', 'print_opaque_list', (['l'], {}), '(l)\n', (520, 523), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n'), ((628, 661), 'pybind11_tests.print_opaque_list', 'print_opaque_list', (['cvp.stringList'], {}), '(cvp.stringList)\n', (645, 661), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n'), ((786, 819), 'pybind11_tests.print_opaque_list', 'print_opaque_list', (['cvp.stringList'], {}), '(cvp.stringList)\n', (803, 819), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n'), ((1489, 1513), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1502, 1513), False, 'import pytest\n'), ((1534, 1563), 'pybind11_tests.get_void_ptr_value', 'get_void_ptr_value', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1552, 1563), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n'), ((1845, 1862), 'pybind11_tests.return_null_str', 'return_null_str', ([], {}), '()\n', (1860, 1862), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n'), ((2093, 2115), 'pybind11_tests.print_opaque_list', 'print_opaque_list', (['ptr'], {}), '(ptr)\n', (2110, 2115), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n'), ((1178, 1212), 'pybind11_tests.ConstructorStats.get', 'ConstructorStats.get', (['ExampleMandA'], {}), '(ExampleMandA)\n', (1198, 1212), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n'), ((1247, 1264), 'pybind11_tests.return_void_ptr', 'return_void_ptr', ([], {}), '()\n', (1262, 1264), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n'), ((1336, 1350), 'pybind11_tests.ExampleMandA', 'ExampleMandA', ([], {}), '()\n', (1348, 1350), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n'), ((1931, 1948), 'pybind11_tests.return_null_str', 'return_null_str', ([], {}), '()\n', (1946, 1948), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n'), ((1389, 1423), 'pybind11_tests.ConstructorStats.get', 'ConstructorStats.get', (['ExampleMandA'], {}), '(ExampleMandA)\n', (1409, 1423), False, 'from pybind11_tests import return_void_ptr, get_void_ptr_value, ExampleMandA, print_opaque_list, return_null_str, get_null_str_value, return_unique_ptr, ConstructorStats\n')] |
import os
import random
import shutil
# xml路径的地址
XmlPath = r'xml_train'
# 原图片的地址
pictureBasePath = r"E:\Insight-MVT_Annotation_Train"
# 保存图片的地址
saveBasePath = r"E:\picture_train"
total_xml = os.listdir(XmlPath)
num = len(total_xml)
list = range(num)
if os.path.exists(saveBasePath) == False: # 判断文件夹是否存在
os.makedirs(saveBasePath)
from tqdm import tqdm
for xml in tqdm(total_xml):
xml_temp = xml.split("__")
folder = xml_temp[0]
filename = xml_temp[1].split(".")[0] + ".jpg"
# print(folder)
# print(filename)
temp_pictureBasePath = os.path.join(pictureBasePath, folder)
filePath = os.path.join(temp_pictureBasePath, filename)
# print(filePath)
newfile = xml.split(".")[0] + ".jpg"
newfile_path = os.path.join(saveBasePath, newfile)
# print(newfile_path)
shutil.copyfile(filePath, newfile_path)
print("xml file total number", num) | [
"os.path.exists",
"os.listdir",
"os.makedirs",
"tqdm.tqdm",
"os.path.join",
"shutil.copyfile"
] | [((204, 223), 'os.listdir', 'os.listdir', (['XmlPath'], {}), '(XmlPath)\n', (214, 223), False, 'import os\n'), ((387, 402), 'tqdm.tqdm', 'tqdm', (['total_xml'], {}), '(total_xml)\n', (391, 402), False, 'from tqdm import tqdm\n'), ((269, 297), 'os.path.exists', 'os.path.exists', (['saveBasePath'], {}), '(saveBasePath)\n', (283, 297), False, 'import os\n'), ((326, 351), 'os.makedirs', 'os.makedirs', (['saveBasePath'], {}), '(saveBasePath)\n', (337, 351), False, 'import os\n'), ((585, 622), 'os.path.join', 'os.path.join', (['pictureBasePath', 'folder'], {}), '(pictureBasePath, folder)\n', (597, 622), False, 'import os\n'), ((639, 683), 'os.path.join', 'os.path.join', (['temp_pictureBasePath', 'filename'], {}), '(temp_pictureBasePath, filename)\n', (651, 683), False, 'import os\n'), ((769, 804), 'os.path.join', 'os.path.join', (['saveBasePath', 'newfile'], {}), '(saveBasePath, newfile)\n', (781, 804), False, 'import os\n'), ((837, 876), 'shutil.copyfile', 'shutil.copyfile', (['filePath', 'newfile_path'], {}), '(filePath, newfile_path)\n', (852, 876), False, 'import shutil\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Module for data simulation, to test algorithms utilized in diagnostics.
<NAME> <<EMAIL>>
2017-03-27 11:22:25 AM EDT
"""
from phantasy.library.physics import Point
import numpy as np
class Distribution(object):
"""Particle distribution for transverse plane, i.e. ``x-o-y`` plane,
default is Gaussian distribution.
Parameters
----------
x0 : float
Mean value along ``x`` direction.
y0 : float
Mean value along ``y`` direction.
sx : float
Standard deviation along ``x`` direction.
sy : float
Standard deviation along ``y`` direction.
N : int
Total point number of particle distribution.
Keyword Arguments
-----------------
mean : list
Central point, ``[x0, y0]``, overrides *x0* and *y0*.
cov : list
Covariance matrix, overrides *sx* and *sy*.
rho : float
Correlation between ``x`` and ``y``, should be within ``[-1, 1]``.
distfile : string
Name of data file to load distribution, contains x and y data,
if *distfile* is valid, the internal data generation would be
ignored.
distdata : array
Array with shape of ``(2,n)`` to initialize distribution.
"""
def __init__(self, x0=0, y0=0, sx=0.1, sy=0.1, N=1000, **kws):
self.distype = None
distfile = kws.get('distfile', None)
distdata = kws.get('distdata', None)
# try to load data from array
if distdata is not None:
self.particles = distdata
else:
# generate internally
if not self.load_distfile(distfile):
self._x, self._y = None, None
if kws.get('mean', None) is not None:
mean = kws.get('mean')
else:
mean = [x0, y0]
if kws.get('cov', None) is not None:
cov = kws.get('cov')
else:
rho = kws.get('rho', None)
if -1.0 <= rho <= 1.0:
cxy = rho * sx * sy
else:
cxy = 0
cov = [[sx ** 2, cxy], [cxy, sy ** 2]]
self.distype = 'gaussian'
self.particles = Distribution.generate_gaussian_distrubution(
mean, cov, N)
else:
# load from external file
print("Load distribution from '{}'".format(distfile))
def load_distfile(self, distfile):
try:
data = np.loadtxt(distfile)
if data.shape[0] == 2:
self._x, self._y = data
else:
self._x, self._y = data.T
self.distype = 'external'
return True
except:
return False
@property
def particles(self):
"""tuple: Array of x, y distribution."""
return self._x, self._y
@particles.setter
def particles(self, p):
self._x, self._y = p
@staticmethod
def generate_gaussian_distrubution(mean, cov, N):
"""Generate random two-dimensional distribution.
"""
x, y = np.random.multivariate_normal(mean, cov, N).T
return x, y
def draw(self):
"""Draw particles.
"""
if self._x is None:
print("Particle distribution is not ready yet.")
return 1
else:
import matplotlib.pyplot as plt
x, y = self.particles
plt.plot(x, y, '.')
plt.show()
@staticmethod
def get_covariance(xarr, yarr, **kws):
"""Get covariance matrix of 'x' and 'y' array.
Parameters
----------
xarr : array
X array.
yarr : array
Y array.
Keyword Arguments
-----------------
norm :
If set, return normalized covariance.
Returns
-------
ret : array
Covariance matrix.
"""
if kws.get('norm', None) is not None:
return np.corrcoef(xarr, yarr)
else:
return np.cov(xarr, yarr)
def get_cov(self, **kws):
"""Return covariance of x and y of distribution,
if *norm* keyword is set, return normalized one.
"""
return Distribution.get_covariance(self._x, self._y, **kws)
def resample(self):
"""Generate normal distribution by resampling.
Returns
-------
ret : Distribution
New Distribution instance.
"""
mean = [np.mean(self._x), np.mean(self._y)]
cov = np.cov(self._x, self._y)
N = self._x.size
return Distribution(mean=mean, cov=cov, N=N)
def rotate(self, angle, p0=None):
"""Rotate particle distribution of *angle* w.r.t. *p0*.
Parameters
----------
angle : float
Anti-clockwised rotating angle, degree.
p0 : Point
Rotating central point, ``(0,0)`` by default.
Returns
-------
ret : Distribution
New Distribution after rotation.
"""
if p0 is None:
p0 = Point(0, 0)
data0 = np.array(self.particles)
disp = np.tile(p0[:], [int(data0.size / 2), 1]).T
theta = angle / 180.0 * np.pi
m = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
data1 = np.dot(m, data0 - disp) + disp
return Distribution(distdata=data1)
def __repr__(self):
x, y = self._x, self._y
cov = np.cov(x, y)
x0, y0 = x.mean(), y.mean()
sx, sy = x.std(ddof=1), y.std(ddof=1)
rho_xy = cov[0, 1] / cov[0, 0] ** 0.5 / cov[1, 1] ** 0.5
ret = '(x_0, y_0) = ({0:.3f},{1:.3f})\n'.format(x0, y0)
ret += 'sigma_x = {0:.3f}\n'.format(sx)
ret += 'sigma_y = {0:.3f}\n'.format(sy)
ret += '(x,y) correlation = {0:.3f}'.format(rho_xy)
return ret
if __name__ == '__main__':
# default
print("{0}{1}{0}".format('-' * 10, 'default'))
ds = Distribution()
print(ds.get_cov())
print(ds.get_cov(norm='True'))
ds.draw()
# internal gaussian w/o correlation
print("{0}{1}{0}".format('-' * 10, 'gaussian/rho=0'))
ds = Distribution(1, 1, 2, 3, 50000)
print(ds.get_cov())
print(ds.get_cov(norm='True'))
ds.draw()
# internal gaussian with correlation
print("{0}{1}{0}".format('-' * 10, 'gaussian/rho=0.5'))
ds = Distribution(1, 1, 2, 3, 50000, rho=0.5)
print(ds.get_cov())
print(ds.get_cov(norm='True'))
ds.draw()
# load external
print("{0}{1}{0}".format('-' * 10, 'external file'))
ds = Distribution(distfile='../../../tests/temp/dist.dat')
print(ds.distype)
print(ds.get_cov())
print(ds.get_cov(norm='True'))
ds.draw()
# resample
print("Resample external loaded dist")
ds1 = ds.resample()
ds1.draw()
| [
"numpy.mean",
"numpy.corrcoef",
"numpy.random.multivariate_normal",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.dot",
"numpy.cov",
"numpy.cos",
"phantasy.library.physics.Point",
"numpy.sin",
"numpy.loadtxt",
"matplotlib.pyplot.show"
] | [((4668, 4692), 'numpy.cov', 'np.cov', (['self._x', 'self._y'], {}), '(self._x, self._y)\n', (4674, 4692), True, 'import numpy as np\n'), ((5250, 5274), 'numpy.array', 'np.array', (['self.particles'], {}), '(self.particles)\n', (5258, 5274), True, 'import numpy as np\n'), ((5644, 5656), 'numpy.cov', 'np.cov', (['x', 'y'], {}), '(x, y)\n', (5650, 5656), True, 'import numpy as np\n'), ((2591, 2611), 'numpy.loadtxt', 'np.loadtxt', (['distfile'], {}), '(distfile)\n', (2601, 2611), True, 'import numpy as np\n'), ((3208, 3251), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', 'N'], {}), '(mean, cov, N)\n', (3237, 3251), True, 'import numpy as np\n'), ((3548, 3567), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {}), "(x, y, '.')\n", (3556, 3567), True, 'import matplotlib.pyplot as plt\n'), ((3580, 3590), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3588, 3590), True, 'import matplotlib.pyplot as plt\n'), ((4110, 4133), 'numpy.corrcoef', 'np.corrcoef', (['xarr', 'yarr'], {}), '(xarr, yarr)\n', (4121, 4133), True, 'import numpy as np\n'), ((4167, 4185), 'numpy.cov', 'np.cov', (['xarr', 'yarr'], {}), '(xarr, yarr)\n', (4173, 4185), True, 'import numpy as np\n'), ((4618, 4634), 'numpy.mean', 'np.mean', (['self._x'], {}), '(self._x)\n', (4625, 4634), True, 'import numpy as np\n'), ((4636, 4652), 'numpy.mean', 'np.mean', (['self._y'], {}), '(self._y)\n', (4643, 4652), True, 'import numpy as np\n'), ((5221, 5232), 'phantasy.library.physics.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (5226, 5232), False, 'from phantasy.library.physics import Point\n'), ((5498, 5521), 'numpy.dot', 'np.dot', (['m', '(data0 - disp)'], {}), '(m, data0 - disp)\n', (5504, 5521), True, 'import numpy as np\n'), ((5395, 5408), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5401, 5408), True, 'import numpy as np\n'), ((5450, 5463), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5456, 5463), True, 'import numpy as np\n'), ((5465, 5478), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5471, 5478), True, 'import numpy as np\n'), ((5411, 5424), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5417, 5424), True, 'import numpy as np\n')] |
# coding: utf-8
import os
import sys
import logging
import requests
import torch
import argparse
import numpy as np
from PIL import Image
from typing import List
from tqdm import tqdm
m4_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../')
sys.path.append(m4_dir)
from m4.models.CLIP import CLIPProcessor, CLIPModel
from datasets import Flrick30k
def calc_metric(sims: np.ndarray,
ground_trues: List[List]
):
"""
Calculating recall@11,5,10, medr, meanr.
sims: (N, M) matrix of similarity scores.
ground_trues: (N, *) idx of ground trues.
@ref: https://github.com/Paranioar/SGRAF/blob/main/evaluation.py
"""
num_query = sims.shape[0]
ranks = np.zeros(num_query)
top1 = np.zeros(num_query)
for index in range(num_query):
inds = np.argsort(sims[index])[::-1]
rank = 1e20
for true_idx in ground_trues[index]:
tmp = np.where(inds == true_idx)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
top1[index] = inds[0]
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
med_rank = np.floor(np.median(ranks)) + 1
mean_rank = ranks.mean() + 1
return {'r@1': r1, 'r@5': r5, 'r@10': r10, 'med_rank': med_rank, 'mean_rank': mean_rank}
def evaluate():
parser = argparse.ArgumentParser()
parser.add_argument("--image_dir", required=True, type=str, help="Images directory path.")
parser.add_argument("--annot_path", required=True, type=str, help="Annotation file path.")
parser.add_argument("--model_dir", required=True, type=str, help="Model directory path.")
parser.add_argument("--split", default='test', type=str, help="train, val, test.")
args = parser.parse_args()
dataset = Flrick30k(image_dir=args.image_dir, annot_path=args.annot_path, split=args.split)
processor = CLIPProcessor.from_pretrained(args.model_dir)
model = CLIPModel.from_pretrained(args.model_dir)
model.eval()
images = dataset.get_all_images()
texts = dataset.get_all_texts()
img_embs = []
img_batch = []
for i, img_path in tqdm(enumerate(images), total=len(images), desc='Image encoding'):
img_batch.append(Image.open(img_path))
if len(img_batch) < 4 and i != len(images) - 1:
continue
else:
with torch.no_grad():
inputs = processor(images=img_batch, return_tensors='pt')
embs = model.get_image_features(**inputs, return_dict=True, output_hidden_states=False)
embs = embs / embs.norm(dim=-1, keepdim=True)
img_embs.append(embs)
img_batch = []
img_embs = torch.cat(img_embs, dim=0).numpy()
txt_embs = []
txt_batch = []
for i, sent in tqdm(enumerate(texts), total=len(texts), desc='Text encoding'):
txt_batch.append(sent)
if len(txt_batch) < 4 and i != len(texts) - 1:
continue
else:
with torch.no_grad():
inputs = processor(text=txt_batch, return_tensors='pt', padding=True, truncation=True, max_length=77)
embs = model.get_text_features(**inputs, return_dict=True, output_hidden_states=False)
embs = embs / embs.norm(dim=-1, keepdim=True)
txt_embs.append(embs)
txt_batch = []
txt_embs = torch.cat(txt_embs, dim=0).numpy()
sims = np.dot(img_embs, txt_embs.transpose())
print("============Image to Text=============")
res = calc_metric(sims, dataset.get_ground_trues('i2t'))
for key, value in res.items():
print(f"{key}: {value}")
print("============Image to Text=============")
print("============Text to Image=============")
res = calc_metric(sims.transpose(), dataset.get_ground_trues('t2i'))
for key, value in res.items():
print(f"{key}: {value}")
if __name__ == "__main__":
evaluate()
| [
"numpy.median",
"PIL.Image.open",
"argparse.ArgumentParser",
"numpy.where",
"numpy.argsort",
"torch.cat",
"numpy.zeros",
"m4.models.CLIP.CLIPModel.from_pretrained",
"os.path.abspath",
"torch.no_grad",
"m4.models.CLIP.CLIPProcessor.from_pretrained",
"sys.path.append",
"datasets.Flrick30k"
] | [((261, 284), 'sys.path.append', 'sys.path.append', (['m4_dir'], {}), '(m4_dir)\n', (276, 284), False, 'import sys\n'), ((718, 737), 'numpy.zeros', 'np.zeros', (['num_query'], {}), '(num_query)\n', (726, 737), True, 'import numpy as np\n'), ((749, 768), 'numpy.zeros', 'np.zeros', (['num_query'], {}), '(num_query)\n', (757, 768), True, 'import numpy as np\n'), ((1462, 1487), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1485, 1487), False, 'import argparse\n'), ((1905, 1991), 'datasets.Flrick30k', 'Flrick30k', ([], {'image_dir': 'args.image_dir', 'annot_path': 'args.annot_path', 'split': 'args.split'}), '(image_dir=args.image_dir, annot_path=args.annot_path, split=args.\n split)\n', (1914, 1991), False, 'from datasets import Flrick30k\n'), ((2003, 2048), 'm4.models.CLIP.CLIPProcessor.from_pretrained', 'CLIPProcessor.from_pretrained', (['args.model_dir'], {}), '(args.model_dir)\n', (2032, 2048), False, 'from m4.models.CLIP import CLIPProcessor, CLIPModel\n'), ((2061, 2102), 'm4.models.CLIP.CLIPModel.from_pretrained', 'CLIPModel.from_pretrained', (['args.model_dir'], {}), '(args.model_dir)\n', (2086, 2102), False, 'from m4.models.CLIP import CLIPProcessor, CLIPModel\n'), ((223, 248), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (238, 248), False, 'import os\n'), ((820, 843), 'numpy.argsort', 'np.argsort', (['sims[index]'], {}), '(sims[index])\n', (830, 843), True, 'import numpy as np\n'), ((1283, 1299), 'numpy.median', 'np.median', (['ranks'], {}), '(ranks)\n', (1292, 1299), True, 'import numpy as np\n'), ((2349, 2369), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2359, 2369), False, 'from PIL import Image\n'), ((2812, 2838), 'torch.cat', 'torch.cat', (['img_embs'], {'dim': '(0)'}), '(img_embs, dim=0)\n', (2821, 2838), False, 'import torch\n'), ((3482, 3508), 'torch.cat', 'torch.cat', (['txt_embs'], {'dim': '(0)'}), '(txt_embs, dim=0)\n', (3491, 3508), False, 'import torch\n'), ((2479, 2494), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2492, 2494), False, 'import torch\n'), ((3106, 3121), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3119, 3121), False, 'import torch\n'), ((933, 959), 'numpy.where', 'np.where', (['(inds == true_idx)'], {}), '(inds == true_idx)\n', (941, 959), True, 'import numpy as np\n'), ((1102, 1121), 'numpy.where', 'np.where', (['(ranks < 1)'], {}), '(ranks < 1)\n', (1110, 1121), True, 'import numpy as np\n'), ((1161, 1180), 'numpy.where', 'np.where', (['(ranks < 5)'], {}), '(ranks < 5)\n', (1169, 1180), True, 'import numpy as np\n'), ((1220, 1240), 'numpy.where', 'np.where', (['(ranks < 10)'], {}), '(ranks < 10)\n', (1228, 1240), True, 'import numpy as np\n')] |
# this file contains the import functions
# from threejs rendering to genreate jsons
import os
import uuid
import sys
from OCC.Core.Visualization import Tesselator
# from OCC.Extend.TopologyUtils import is_edge, is_wire, discretize_edge, discretize_wire
def generate_json_from_shape(
shape,
uri,
export_edges=False,
color=(0.65, 0.65, 0.65),
specular_color=(1, 1, 1),
shininess=0.9,
transparency=0.0,
line_color=(0, 0.0, 0.0),
line_width=2.0,
mesh_quality=1.0,
filename="test",
):
# if the shape is an edge or a wire, use the related functions
# if is_edge(shape):
# print("discretize an edge")
# pnts = discretize_edge(shape)
# edge_hash = "edg%s" % uuid.uuid4().hex
# str_to_write = export_edgedata_to_json(edge_hash, pnts)
# edge_full_path = os.path.join(self._path, edge_hash + ".json")
# with open(edge_full_path, "w") as edge_file:
# edge_file.write(str_to_write)
# # store this edge hash
# self._3js_edges[edge_hash] = [color, line_width]
# return True
# elif is_wire(shape):
# print("discretize a wire")
# pnts = discretize_wire(shape)
# wire_hash = "wir%s" % uuid.uuid4().hex
# str_to_write = export_edgedata_to_json(wire_hash, pnts)
# wire_full_path = os.path.join(self._path, wire_hash + ".json")
# print(wire_full_path)
# with open(wire_full_path, "w") as wire_file:
# wire_file.write(str_to_write)
# # store this edge hash
# self._3js_edges[wire_hash] = [color, line_width]
# return True
# TODO change uuid here
# shape_uuid = uuid.uuid4().hex
shape_uuid = uri
# shape_hash = "shp%s" % shape_uuid
shape_hash = "shp%s" % uuid.uuid4().hex
# tesselate
tess = Tesselator(shape)
tess.Compute(
compute_edges=export_edges,
mesh_quality=mesh_quality,
uv_coords=False,
parallel=True,
)
sys.stdout.flush()
# export to 3JS
# shape_full_path = os.path.join(self._path, shape_hash + '.json')
# print(f'{shape_full_path} shape path')
dirpath = os.getcwd()
staticpath = "src/app/render/static/shapes"
shape_full_path = os.path.join(dirpath, staticpath, shape_hash + ".json")
# generate the mesh
# tess.ExportShapeToThreejs(shape_hash, shape_full_path)
# and also to JSON
with open(shape_full_path, "w") as json_file:
json_file.write(tess.ExportShapeToThreejsJSONString(shape_uuid))
# draw edges if necessary
# if export_edges:
# # export each edge to a single json
# # get number of edges
# nbr_edges = tess.ObjGetEdgeCount()
# for i_edge in range(nbr_edges):
# # after that, the file can be appended
# str_to_write = ""
# edge_point_set = []
# nbr_vertices = tess.ObjEdgeGetVertexCount(i_edge)
# for i_vert in range(nbr_vertices):
# edge_point_set.append(tess.GetEdgeVertex(i_edge, i_vert))
# # write to file
# #edge_hash = "edg%s" % uuid.uuid4().hex
# # str_to_write += export_edgedata_to_json(edge_hash, edge_point_set)
# # create the file
# # edge_full_path = os.path.join(self._path, edge_hash + ".json")
# with open(edge_full_path, "w") as edge_file:
# edge_file.write(str_to_write)
# # store this edge hash, with black color
# # self._3js_edges[hash] = [(0, 0, 0), line_width]
return shape_hash
| [
"os.path.join",
"uuid.uuid4",
"os.getcwd",
"OCC.Core.Visualization.Tesselator",
"sys.stdout.flush"
] | [((1836, 1853), 'OCC.Core.Visualization.Tesselator', 'Tesselator', (['shape'], {}), '(shape)\n', (1846, 1853), False, 'from OCC.Core.Visualization import Tesselator\n'), ((2002, 2020), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2018, 2020), False, 'import sys\n'), ((2171, 2182), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2180, 2182), False, 'import os\n'), ((2253, 2308), 'os.path.join', 'os.path.join', (['dirpath', 'staticpath', "(shape_hash + '.json')"], {}), "(dirpath, staticpath, shape_hash + '.json')\n", (2265, 2308), False, 'import os\n'), ((1792, 1804), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1802, 1804), False, 'import uuid\n')] |
import os
import tempfile
from datetime import datetime
from shutil import copyfileobj
from tempfile import SpooledTemporaryFile
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import File
from django.core.files.storage import Storage
from django.db.models.fields.files import FieldFile
from django.utils._os import safe_join
from django.utils.deconstruct import deconstructible
from rclonestorage.rclone import RcloneRemote
def _setting(name, default=None):
return getattr(settings, name, default)
class RcloneRemoteFile(File):
def __init__(self, name, storage, mode='rb'):
self.name = name
self._storage = storage
self._file = None
self._mode = mode
def _get_file(self):
if self._file is None:
self._file = SpooledTemporaryFile()
with tempfile.TemporaryDirectory() as tmpdirname:
filename = os.path.basename(self.name)
filepath = os.path.join(tmpdirname, filename)
self._storage.rclone.get_file(self.name, filepath)
with open(filepath, self._mode) as f:
copyfileobj(f, self._file)
self._file.seek(0)
return self._file
def _set_file(self, value):
self._file = value
file = property(_get_file, _set_file)
@deconstructible
class RcloneRemoteStorage(Storage):
"""RClone Storage class for Django pluggable storage system."""
remote = _setting('RCLONE_REMOTE')
location = _setting('RCLONE_REMOTE_ROOT', '/')
config = _setting('RCLONE_CONFIG_PATH')
def __init__(self, remote=remote, root_path=location, config_path=config):
if remote is None:
raise ImproperlyConfigured("You must configure an remote at 'settings.RCLONE_REMOTE'.")
self.root_path = root_path
self.rclone = RcloneRemote(remote, config_path=config_path)
def path(self, name):
if name == '/':
name = ''
return safe_join(self.root_path, name).replace('\\', '/')
def delete(self, name):
self.rclone.delete(self.path(name))
def exists(self, name):
return self.rclone.exists(self.path(name))
def listdir(self, path):
directories, files = [], []
full_path = self.path(path)
if full_path == '/':
full_path = ''
metadata = self.rclone.ls(full_path)
for entry in metadata:
if entry["IsDir"]:
directories.append(entry["Name"])
else:
files.append(entry["Name"])
return directories, files
def size(self, name):
return self.rclone.size(self.path(name))
@staticmethod
def _datetime_from_timestring(ts):
try:
return datetime.strptime(ts, "%Y-%m-%dT%H:%M:%S%z")
except ValueError:
return datetime.strptime(ts, "%Y-%m-%dT%H:%M:%S.%f%z")
def get_accessed_time(self, name):
return self._datetime_from_timestring(self.rclone.meta(self.path(name))["ModTime"])
def get_created_time(self, name):
return self._datetime_from_timestring(self.rclone.meta(self.path(name))["ModTime"])
def get_modified_time(self, name):
return self._datetime_from_timestring(self.rclone.meta(self.path(name))["ModTime"])
def _open(self, name, mode='rb'):
remote_file = RcloneRemoteFile(self.path(name), self, mode=mode)
return remote_file
def _save(self, name, content):
if isinstance(content, FieldFile):
try:
self.rclone.send_file(content.path, self.path(name))
return name
except ValueError:
pass
with tempfile.TemporaryDirectory() as tmpdir:
local_path = os.path.join(tmpdir, os.path.basename(name))
content.open()
with open(local_path, 'wb') as tmpfile:
copyfileobj(content, tmpfile)
content.close()
self.rclone.send_file(local_path, self.path(name))
return name
def force_save(self, name, content):
if name is None:
name = content.name
if not hasattr(content, 'chunks'):
content = File(content, name)
return self._save(name, content)
| [
"tempfile.TemporaryDirectory",
"rclonestorage.rclone.RcloneRemote",
"shutil.copyfileobj",
"django.utils._os.safe_join",
"datetime.datetime.strptime",
"os.path.join",
"tempfile.SpooledTemporaryFile",
"os.path.basename",
"django.core.files.base.File",
"django.core.exceptions.ImproperlyConfigured"
] | [((1902, 1947), 'rclonestorage.rclone.RcloneRemote', 'RcloneRemote', (['remote'], {'config_path': 'config_path'}), '(remote, config_path=config_path)\n', (1914, 1947), False, 'from rclonestorage.rclone import RcloneRemote\n'), ((849, 871), 'tempfile.SpooledTemporaryFile', 'SpooledTemporaryFile', ([], {}), '()\n', (869, 871), False, 'from tempfile import SpooledTemporaryFile\n'), ((1762, 1848), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""You must configure an remote at \'settings.RCLONE_REMOTE\'."""'], {}), '(\n "You must configure an remote at \'settings.RCLONE_REMOTE\'.")\n', (1782, 1848), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((2819, 2863), 'datetime.datetime.strptime', 'datetime.strptime', (['ts', '"""%Y-%m-%dT%H:%M:%S%z"""'], {}), "(ts, '%Y-%m-%dT%H:%M:%S%z')\n", (2836, 2863), False, 'from datetime import datetime\n'), ((3751, 3780), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3778, 3780), False, 'import tempfile\n'), ((4263, 4282), 'django.core.files.base.File', 'File', (['content', 'name'], {}), '(content, name)\n', (4267, 4282), False, 'from django.core.files.base import File\n'), ((889, 918), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (916, 918), False, 'import tempfile\n'), ((961, 988), 'os.path.basename', 'os.path.basename', (['self.name'], {}), '(self.name)\n', (977, 988), False, 'import os\n'), ((1016, 1050), 'os.path.join', 'os.path.join', (['tmpdirname', 'filename'], {}), '(tmpdirname, filename)\n', (1028, 1050), False, 'import os\n'), ((2036, 2067), 'django.utils._os.safe_join', 'safe_join', (['self.root_path', 'name'], {}), '(self.root_path, name)\n', (2045, 2067), False, 'from django.utils._os import safe_join\n'), ((2910, 2957), 'datetime.datetime.strptime', 'datetime.strptime', (['ts', '"""%Y-%m-%dT%H:%M:%S.%f%z"""'], {}), "(ts, '%Y-%m-%dT%H:%M:%S.%f%z')\n", (2927, 2957), False, 'from datetime import datetime\n'), ((3838, 3860), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (3854, 3860), False, 'import os\n'), ((3957, 3986), 'shutil.copyfileobj', 'copyfileobj', (['content', 'tmpfile'], {}), '(content, tmpfile)\n', (3968, 3986), False, 'from shutil import copyfileobj\n'), ((1192, 1218), 'shutil.copyfileobj', 'copyfileobj', (['f', 'self._file'], {}), '(f, self._file)\n', (1203, 1218), False, 'from shutil import copyfileobj\n')] |
# -*- coding: utf-8 -*-
from visigoth import Diagram
from visigoth.common import EmbeddedSvg
from visigoth.common import Text
svg = """<?xml version="1.0" encoding="utf-8"?>
<svg height="100" version="1.1" width="100" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<circle r="45" cx="50" cy="50" fill="orange" stroke="purple" stroke-width="10" />
</svg>
"""
d = Diagram()
d.add(Text("Embedded SVG!"))
d.add(EmbeddedSvg(svg,400,40))
html = d.draw(format="html")
f = open("example.html", "w")
f.write(html)
f.close()
| [
"visigoth.common.EmbeddedSvg",
"visigoth.Diagram",
"visigoth.common.Text"
] | [((440, 449), 'visigoth.Diagram', 'Diagram', ([], {}), '()\n', (447, 449), False, 'from visigoth import Diagram\n'), ((456, 477), 'visigoth.common.Text', 'Text', (['"""Embedded SVG!"""'], {}), "('Embedded SVG!')\n", (460, 477), False, 'from visigoth.common import Text\n'), ((485, 510), 'visigoth.common.EmbeddedSvg', 'EmbeddedSvg', (['svg', '(400)', '(40)'], {}), '(svg, 400, 40)\n', (496, 510), False, 'from visigoth.common import EmbeddedSvg\n')] |
import json, os, pandas as pd, numpy as np, csv
from datasets import load_dataset
from collections import Counter
import requests, zipfile, tarfile
import utils_scorer, utils_misc
# SummaC Benchmark
class SummaCBenchmark:
def __init__(self, benchmark_folder="/home/phillab/data/summac_benchmark/", dataset_names=["cogensum", "xsumfaith", "polytope", "factcc", "summeval", "frank"], cut="val"):
assert cut in ["val", "test"], "Unrecognized cut for the Fact Checking Benchmark"
if not os.path.exists(benchmark_folder):
os.makedirs(benchmark_folder)
self.cut = cut
self.benchmark_folder = benchmark_folder
self.cnndm = None
self.xsum = None
self.datasets = []
for dataset_name in dataset_names:
if dataset_name == "cogensum":
self.load_cogensumm()
elif dataset_name == "xsumfaith":
self.load_xsumfaith()
elif dataset_name == "polytope":
self.load_polytope()
elif dataset_name == "factcc":
self.load_factcc()
elif dataset_name == "summeval":
self.load_summeval()
elif dataset_name == "frank":
self.load_frank()
else:
raise ValueError("Unrecognized dataset name: %s" % (dataset_name))
# Underlying dataset loader: CNN/DM and XSum
def get_cnndm_document(self, aid):
if self.cnndm is None:
self.cnndm = load_dataset("cnn_dailymail", "3.0.0")
self.cnndm_id2article = {}
for cut in ["test", "validation"]:
self.cnndm_id2article.update({d["id"]: d["article"] for d in self.cnndm[cut]})
return self.cnndm_id2article[aid]
def get_xsum_document(self, aid):
if self.xsum is None:
self.xsum = load_dataset("xsum")["test"]
self.xsumid2article = {d["id"]: d["document"] for d in self.xsum}
return self.xsumid2article[aid]
# Individual dataset loaders
def load_cogensumm(self):
# Correctness of Generated Summaries: https://www.aclweb.org/anthology/P19-1213.pdf
# CoGenSumm: https://tudatalib.ulb.tu-darmstadt.de/handle/tudatalib/2002
dataset_folder = os.path.join(self.benchmark_folder, "cogensumm/")
if not os.path.exists(dataset_folder):
print("==== CoGenSumm dataset not found, downloading from scratch")
os.makedirs(dataset_folder)
data = requests.get("https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/2002/summary-correctness-v1.0.zip?sequence=3&isAllowed=y")
zip_file = os.path.join(dataset_folder, "summary-correctness-v1.0.zip")
with open(zip_file, "wb") as f:
f.write(data.content)
with zipfile.ZipFile(zip_file, "r") as zip_ref:
zip_ref.extractall(dataset_folder)
os.remove(zip_file)
clean_dataset = []
for fn in os.listdir(dataset_folder):
if self.cut not in fn:
continue
with open(os.path.join(dataset_folder, fn), "r") as f:
dataset = json.load(f)
if "_org" in fn or fn == "test_chen18_reranked.json":
for aid in dataset:
document = self.get_cnndm_document(aid)
label = 0 if dataset[aid]["label"] == "Incorrect" else 1
sents = dataset[aid]["sents"]
summary = " ".join([sents[str(i)]["text"] for i in range(len(sents))])
clean_dataset.append({"filename": fn, "label": label, "document": document, "claim": summary, "cnndm_id": aid, "annotations": [label], "dataset": "cogensumm", "origin": "cnndm"})
elif fn == "val_reranking.json":
for aid in dataset:
document = self.get_cnndm_document(aid)
for idx, data in dataset[aid].items():
label = 0 if data["label"] == "Incorrect" else 1
summary = " ".join([data["sents"][str(i)]["text"] for i in range(len(data["sents"]))])
clean_dataset.append({"filename": fn, "label": label, "document": document, "claim": summary, "cnndm_id": aid, "annotations": [label], "dataset": "cogensumm", "origin": "cnndm"})
elif fn == "val_sentence_pairs.json":
for d in dataset:
aid = d["article_id"]
document = self.get_cnndm_document(aid)
clean_dataset.append({"filename": fn, "label": 1, "document": document, "claim": d["correct_sent"], "cnndm_id": aid, "annotations": [1], "dataset": "cogensumm", "origin": "cnndm"})
clean_dataset.append({"filename": fn, "label": 0, "document": document, "claim": d["incorrect_sent"], "cnndm_id": aid, "annotations": [0], "dataset": "cogensumm", "origin": "cnndm"})
self.datasets.append({"name": "cogensumm", "dataset": clean_dataset})
def load_xsumfaith(self):
# On Faithfulness and Factuality in Abstractive Summarization - ACL 2020
# https://github.com/google-research-datasets/xsum_hallucination_annotations
# https://aclanthology.org/2020.acl-main.173.pdf
dataset_folder = os.path.join(self.benchmark_folder, "xsumfaith/")
if not os.path.exists(dataset_folder):
print("==== XSum dataset not found, downloading from scratch")
os.makedirs(dataset_folder)
csv_file = requests.get("https://github.com/google-research-datasets/xsum_hallucination_annotations/raw/master/hallucination_annotations_xsum_summaries.csv")
with open(os.path.join(dataset_folder, "hallucination_annotations_xsum_summaries.csv"), "wb") as f:
f.write(csv_file.content)
path_to_annotation = os.path.join(dataset_folder, "hallucination_annotations_xsum_summaries.csv")
with open(path_to_annotation, "r") as f:
raw_data = list(csv.reader(f))
dataset = []
keys = raw_data[0]
for line in raw_data[1:]:
dataset.append({k: v for k, v in zip(keys, line)})
groups = {}
for d in dataset:
k = (d["bbcid"], d["system"])
if k not in groups:
groups[k] = []
groups[k].append(d)
clean_dataset = []
for k, vs in groups.items():
A = vs[0]
document = self.get_xsum_document(A["bbcid"])
labels = [v["hallucination_type"] for v in vs]
annotations = [1 if label == "NULL" else 0 for label in labels]
most_common_label = Counter(labels).most_common(1)[0][0]
label = 1 if most_common_label == "NULL" else 0
c = "val" if len(clean_dataset) % 2 == 0 else "test"
clean_dataset.append({"document": document, "claim": A["summary"], "bbcid": A["bbcid"], "model_name": A["system"], "label": label, "cut": c, "annotations": annotations, "dataset": "xsumfaith", "origin": "xsum"})
final_dataset = [d for d in clean_dataset if d["cut"]==self.cut]
self.datasets.append({"name": "xsumfaith", "dataset": final_dataset})
def load_polytope(self, which_label="overall"):
# What Have We Achieved on Text Summarization? [https://arxiv.org/abs/2010.04529]
# Dataset must be downloaded from the Github repo: https://github.com/hddbang/polytope
assert which_label in ["overall", "omission", "addition", "duplication", "inaccuracy"], "Unrecognized `which label`"
dataset_folder = os.path.join(self.benchmark_folder, "polytope")
if not os.path.exists(dataset_folder):
print("==== Polytope dataset not found, downloading from scratch")
os.makedirs(dataset_folder)
for model_name in ["BART", "Bert_Ext", "Bert_Ext_Abs", "BottomUp", "PG", "PG_Coverage", "Summa", "TextRank", "seq2seq"]:
url = "https://github.com/hddbang/PolyTope/raw/master/outputs_with_human_annotation/Human_Annotation_Summarization_%s.xlsm" % (model_name)
r = requests.get(url)
with open(os.path.join(dataset_folder, "Human_Annotation_Summarization_%s.xlsm" % (model_name)), "wb") as f:
f.write(r.content)
full_dataset = []
for fn in os.listdir(dataset_folder):
fn = os.path.join(dataset_folder, fn)
all_segments = pd.read_excel(fn, sheet_name="Scores per segment")
ID2row = {}
for i, segment in all_segments.iterrows():
c = "val" if i % 2 == 0 else "test"
if str(segment["ID"]) != "nan":
ID2row[segment["ID"]] = {"ID": segment["ID"], "document": segment["Source"], "claim": segment["Target"], "errors": [], "cut": c}
for i, row in pd.read_excel(fn, sheet_name="Error Log").iterrows():
if str(row["Subtypes"]) != "nan":
ID2row[row["ID"]]["errors"].append(row["Subtypes"])
for ID in ID2row:
d = ID2row[ID]
d["overall_label"] = 1 if len(d["errors"]) == 0 else 0
d["omission_label"] = 0 if "Omission" in d["errors"] else 1
d["addition_label"] = 0 if "Addition" in d["errors"] else 1
d["duplication_label"] = 0 if "Duplication" in d["errors"] else 1
d["inaccuracy_label"] = 0 if "Inaccuracy_internal" in d["errors"] or "Inaccuracy_external" in d["errors"] else 1
if which_label is not None:
d["label"] = d["%s_label" % (which_label)]
d["dataset"] = "polytope"
d["annotations"] = [d["label"]]
d["origin"] = "cnndm"
full_dataset.append(d)
cut_dataset = [d for d in full_dataset if d["cut"]==self.cut]
self.datasets.append({"name": "polytope", "dataset": cut_dataset})
def load_factcc(self, max_entries=-1):
# Evaluating the Factual Consistency of Abstractive Text Summarization [https://arxiv.org/abs/1910.12840]
# Dataset for each split must be downloaded from the Github repo: https://github.com/salesforce/factCC
dataset_folder = os.path.join(self.benchmark_folder, "factcc/")
if not os.path.exists(dataset_folder):
print("==== FactCC dataset not found, downloading from scratch")
os.makedirs(dataset_folder)
urls = ["https://storage.googleapis.com/sfr-factcc-data-research/unpaired_generated_data.tar.gz", "https://storage.googleapis.com/sfr-factcc-data-research/unpaired_annotated_data.tar.gz"]
for url in urls:
zip_name = url.split("/")[-1]
r = requests.get(url)
with open(os.path.join(dataset_folder, zip_name), "wb") as f:
f.write(r.content)
with tarfile.open(os.path.join(dataset_folder, zip_name), "r:gz") as f:
f.extractall(dataset_folder)
os.remove(os.path.join(dataset_folder, zip_name))
if self.cut == "train":
dataset = []
with open(os.path.join(dataset_folder, "unpaired_generated_data/data-original/data-train.jsonl"), "r") as f:
for i, line in enumerate(f):
if max_entries > 0 and i >= max_entries:
break
D = json.loads(line)
aid = D["filepath"].split("/")[-1].replace(".story", "")
full_text = self.get_cnndm_document(aid)
label = 1 if D["label"]=="CORRECT" else 0
datum = {"document": full_text, "claim": D["claim"], "cnndm_id": D["id"], "label": label, "dataset": "factcc", "origin": "cnndm"}
dataset.append(datum)
if self.cut in ["val", "test"]:
factcc_file = os.path.join(dataset_folder, "unpaired_annotated_data/%s/data-dev.jsonl" % (self.cut))
dataset = []
with open(factcc_file, "r") as f:
for line in f:
dataset.append(json.loads(line))
for d in dataset:
aid = d["filepath"].split("/")[-1].replace(".story", "")
d["document"] = self.get_cnndm_document(aid)
d["label"] = 1 if d["label"] == "CORRECT" else 0
d["annotations"] = [d["label"]]
d["dataset"] = "factcc"
d["origin"] = "cnndm"
self.datasets.append({"name": "factcc", "dataset": dataset})
def load_summeval(self, key_focus="consistency"):
assert key_focus in ["consistency", "coherence", "fluency", "relevance"]
# SummEval: Re-evaluating Summarization Evaluation [https://arxiv.org/abs/2007.12626]
# Data files must be downloaded from the following Github repository: https://github.com/Yale-LILY/SummEval
raw_dataset = []
dataset_folder = os.path.join(self.benchmark_folder, "summeval/")
fn = os.path.join(dataset_folder, "model_annotations.aligned.scored.jsonl")
if not os.path.exists(dataset_folder):
print("==== SummEval dataset not found, downloading from scratch")
os.makedirs(dataset_folder)
# From the 4/19/2020 update on the README: https://github.com/Yale-LILY/SummEval
utils_misc.download_file_from_google_drive("1d2Iaz3jNraURP1i7CfTqPIj8REZMJ3tS", fn)
with open(fn, "r") as f:
for line in f:
raw_dataset.append(json.loads(line))
clean_dataset = []
for i, d in enumerate(raw_dataset):
c = "val" if i % 2 == 0 else "test"
_, _, article_id = d["id"].split("-")
document = self.get_cnndm_document(article_id)
annotations = d["expert_annotations"]
consistencies = [a[key_focus] for a in annotations]
final_label = 1 if len([cons for cons in consistencies if cons==5]) > len(annotations)/2 else 0
annotations = [1 if cons == 5 else 0 for cons in consistencies]
error_type = "no error" if final_label == 1 else "error"
clean_dataset.append({"document": document, "claim": d["decoded"], "label": final_label, "model_name": d["model_id"], "cnndm_id": d["id"], "cut": c, "annotations": annotations, "dataset": "summeval", "origin": "cnndm", "error_type": error_type})
final_dataset = [d for d in clean_dataset if d["cut"] == self.cut]
self.datasets.append({"name": "summeval", "dataset": final_dataset})
def load_frank(self):
# FRANK: Factuality Evaluation Benchmark [https://aclanthology.org/2021.naacl-main.383.pdf]
# Files must be downloaded from the Github repository: https://github.com/artidoro/frank
dataset_folder = os.path.join(self.benchmark_folder, "frank/")
if not os.path.exists(dataset_folder):
print("==== Frank dataset not found, downloading from scratch")
os.makedirs(dataset_folder)
fns = ["human_annotations_sentence.json", "validation_split.txt", "test_split.txt"]
for fn in fns:
data = requests.get("https://raw.githubusercontent.com/artidoro/frank/main/data/%s" % fn)
with open(os.path.join(dataset_folder, fn), "w") as f:
f.write(data.text)
raw_file = os.path.join(dataset_folder, "human_annotations_sentence.json")
val_hash_file = os.path.join(dataset_folder, "validation_split.txt")
test_hash_file = os.path.join(dataset_folder, "test_split.txt")
with open(val_hash_file if self.cut=="val" else test_hash_file, "r") as f:
valid_hashes = set([line.strip() for line in f])
with open(raw_file, "r") as f:
raw_dataset = json.load(f)
dataset = []
for d in raw_dataset:
article = d["article"]
origin = "cnndm" if len(d["hash"]) >= 40 else "xsum"
if d["hash"] not in valid_hashes:
continue
summ_labels = []
annotator_labels = {}
for annot in d["summary_sentences_annotations"]:
annot_vals = [an for ans in annot.values() for an in ans]
noerror_count = len([an for an in annot_vals if an=="NoE"])
label = 1 if noerror_count >= 2 else 0
summ_labels.append(label)
for anno_name, anno in annot.items():
if anno_name not in annotator_labels:
annotator_labels[anno_name] = []
annotator_labels[anno_name] += anno
annotations = [1 if all(a=="NoE" for a in annos) else 0 for annos in annotator_labels.values()]
label = 0 if any(sl==0 for sl in summ_labels) else 1
error_type = "NoE"
if label == 0:
errors = [anno for annos in annotator_labels.values() for anno in annos if anno != "NoE"]
error_type = Counter(errors).most_common(1)[0][0]
summary = d["summary"]
dataset.append({"document": article, "claim": summary, "label": label, "cut": self.cut, "hash": d["hash"], "model_name": d["model_name"], "annotations": annotations, "dataset": "frank", "origin": origin, "error_type": error_type})
self.datasets.append({"name": "frank", "dataset": dataset})
def get_dataset(self, dataset_name):
for dataset in self.datasets:
if dataset["name"] == dataset_name:
return dataset["dataset"]
raise ValueError("Unrecognized dataset name: %s" % (dataset_name))
def print_stats(self):
dataset_stats = []
for dataset in self.datasets:
N_pos, N_neg = len([d for d in dataset["dataset"] if d["label"]==1]), len([d for d in dataset["dataset"] if d["label"]==0])
dataset_stats.append({"name": dataset["name"], "N": len(dataset["dataset"]), "N_pos": N_pos, "N_neg": N_neg, "frac_pos": N_pos/(N_pos+N_neg)})
print(pd.DataFrame(dataset_stats))
def evaluate(self, scorer):
benchmark = []
for dataset in self.datasets:
dataset_labels = [d["label"] for d in dataset["dataset"]]
dataset_preds = scorer.score([d["document"] for d in dataset["dataset"]], [d["claim"] for d in dataset["dataset"]])["scores"]
dataset_thresh, dataset_f1 = utils_scorer.choose_best_threshold(dataset_labels, dataset_preds)
benchmark.append({"name": dataset["name"], "score": dataset_f1, "threshold": dataset_thresh})
return {"overall_score": np.mean([t["score"] for t in benchmark]), "benchmark": benchmark}
if __name__ == "__main__":
import random
for cut in ["val", "test"]:
summac_benchmark = SummaCBenchmark(benchmark_folder="/home/tingu/data/summac_benchmark2/", cut=cut)
print("============= SUMMAC %s ===============" % (cut.upper()))
summac_benchmark.print_stats()
for dataset in summac_benchmark.datasets:
print("\n============= %s ===============" % (dataset["name"]))
random.shuffle(dataset["dataset"])
print(dataset["dataset"][0]["document"][:400])
print("-------------")
print(dataset["dataset"][0]["claim"])
print("-------------")
print(dataset["dataset"][0]["label"])
| [
"os.path.exists",
"numpy.mean",
"os.listdir",
"json.loads",
"random.shuffle",
"os.makedirs",
"pandas.DataFrame",
"zipfile.ZipFile",
"utils_misc.download_file_from_google_drive",
"os.path.join",
"requests.get",
"collections.Counter",
"datasets.load_dataset",
"utils_scorer.choose_best_threshold",
"pandas.read_excel",
"json.load",
"csv.reader",
"os.remove"
] | [((2271, 2320), 'os.path.join', 'os.path.join', (['self.benchmark_folder', '"""cogensumm/"""'], {}), "(self.benchmark_folder, 'cogensumm/')\n", (2283, 2320), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((3001, 3027), 'os.listdir', 'os.listdir', (['dataset_folder'], {}), '(dataset_folder)\n', (3011, 3027), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((5311, 5360), 'os.path.join', 'os.path.join', (['self.benchmark_folder', '"""xsumfaith/"""'], {}), "(self.benchmark_folder, 'xsumfaith/')\n", (5323, 5360), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((5878, 5954), 'os.path.join', 'os.path.join', (['dataset_folder', '"""hallucination_annotations_xsum_summaries.csv"""'], {}), "(dataset_folder, 'hallucination_annotations_xsum_summaries.csv')\n", (5890, 5954), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((7633, 7680), 'os.path.join', 'os.path.join', (['self.benchmark_folder', '"""polytope"""'], {}), "(self.benchmark_folder, 'polytope')\n", (7645, 7680), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((8383, 8409), 'os.listdir', 'os.listdir', (['dataset_folder'], {}), '(dataset_folder)\n', (8393, 8409), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((10282, 10328), 'os.path.join', 'os.path.join', (['self.benchmark_folder', '"""factcc/"""'], {}), "(self.benchmark_folder, 'factcc/')\n", (10294, 10328), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((13025, 13073), 'os.path.join', 'os.path.join', (['self.benchmark_folder', '"""summeval/"""'], {}), "(self.benchmark_folder, 'summeval/')\n", (13037, 13073), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((13087, 13157), 'os.path.join', 'os.path.join', (['dataset_folder', '"""model_annotations.aligned.scored.jsonl"""'], {}), "(dataset_folder, 'model_annotations.aligned.scored.jsonl')\n", (13099, 13157), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((14888, 14933), 'os.path.join', 'os.path.join', (['self.benchmark_folder', '"""frank/"""'], {}), "(self.benchmark_folder, 'frank/')\n", (14900, 14933), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((15457, 15520), 'os.path.join', 'os.path.join', (['dataset_folder', '"""human_annotations_sentence.json"""'], {}), "(dataset_folder, 'human_annotations_sentence.json')\n", (15469, 15520), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((15545, 15597), 'os.path.join', 'os.path.join', (['dataset_folder', '"""validation_split.txt"""'], {}), "(dataset_folder, 'validation_split.txt')\n", (15557, 15597), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((15623, 15669), 'os.path.join', 'os.path.join', (['dataset_folder', '"""test_split.txt"""'], {}), "(dataset_folder, 'test_split.txt')\n", (15635, 15669), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((505, 537), 'os.path.exists', 'os.path.exists', (['benchmark_folder'], {}), '(benchmark_folder)\n', (519, 537), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((551, 580), 'os.makedirs', 'os.makedirs', (['benchmark_folder'], {}), '(benchmark_folder)\n', (562, 580), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((1505, 1543), 'datasets.load_dataset', 'load_dataset', (['"""cnn_dailymail"""', '"""3.0.0"""'], {}), "('cnn_dailymail', '3.0.0')\n", (1517, 1543), False, 'from datasets import load_dataset\n'), ((2336, 2366), 'os.path.exists', 'os.path.exists', (['dataset_folder'], {}), '(dataset_folder)\n', (2350, 2366), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((2460, 2487), 'os.makedirs', 'os.makedirs', (['dataset_folder'], {}), '(dataset_folder)\n', (2471, 2487), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((2507, 2654), 'requests.get', 'requests.get', (['"""https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/2002/summary-correctness-v1.0.zip?sequence=3&isAllowed=y"""'], {}), "(\n 'https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/2002/summary-correctness-v1.0.zip?sequence=3&isAllowed=y'\n )\n", (2519, 2654), False, 'import requests, zipfile, tarfile\n'), ((2668, 2728), 'os.path.join', 'os.path.join', (['dataset_folder', '"""summary-correctness-v1.0.zip"""'], {}), "(dataset_folder, 'summary-correctness-v1.0.zip')\n", (2680, 2728), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((2935, 2954), 'os.remove', 'os.remove', (['zip_file'], {}), '(zip_file)\n', (2944, 2954), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((5376, 5406), 'os.path.exists', 'os.path.exists', (['dataset_folder'], {}), '(dataset_folder)\n', (5390, 5406), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((5495, 5522), 'os.makedirs', 'os.makedirs', (['dataset_folder'], {}), '(dataset_folder)\n', (5506, 5522), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((5547, 5703), 'requests.get', 'requests.get', (['"""https://github.com/google-research-datasets/xsum_hallucination_annotations/raw/master/hallucination_annotations_xsum_summaries.csv"""'], {}), "(\n 'https://github.com/google-research-datasets/xsum_hallucination_annotations/raw/master/hallucination_annotations_xsum_summaries.csv'\n )\n", (5559, 5703), False, 'import requests, zipfile, tarfile\n'), ((7696, 7726), 'os.path.exists', 'os.path.exists', (['dataset_folder'], {}), '(dataset_folder)\n', (7710, 7726), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((7819, 7846), 'os.makedirs', 'os.makedirs', (['dataset_folder'], {}), '(dataset_folder)\n', (7830, 7846), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((8428, 8460), 'os.path.join', 'os.path.join', (['dataset_folder', 'fn'], {}), '(dataset_folder, fn)\n', (8440, 8460), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((8489, 8539), 'pandas.read_excel', 'pd.read_excel', (['fn'], {'sheet_name': '"""Scores per segment"""'}), "(fn, sheet_name='Scores per segment')\n", (8502, 8539), True, 'import json, os, pandas as pd, numpy as np, csv\n'), ((10344, 10374), 'os.path.exists', 'os.path.exists', (['dataset_folder'], {}), '(dataset_folder)\n', (10358, 10374), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((10465, 10492), 'os.makedirs', 'os.makedirs', (['dataset_folder'], {}), '(dataset_folder)\n', (10476, 10492), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((11960, 12048), 'os.path.join', 'os.path.join', (['dataset_folder', "('unpaired_annotated_data/%s/data-dev.jsonl' % self.cut)"], {}), "(dataset_folder, 'unpaired_annotated_data/%s/data-dev.jsonl' %\n self.cut)\n", (11972, 12048), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((13173, 13203), 'os.path.exists', 'os.path.exists', (['dataset_folder'], {}), '(dataset_folder)\n', (13187, 13203), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((13296, 13323), 'os.makedirs', 'os.makedirs', (['dataset_folder'], {}), '(dataset_folder)\n', (13307, 13323), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((13430, 13517), 'utils_misc.download_file_from_google_drive', 'utils_misc.download_file_from_google_drive', (['"""1d2Iaz3jNraURP1i7CfTqPIj8REZMJ3tS"""', 'fn'], {}), "('1d2Iaz3jNraURP1i7CfTqPIj8REZMJ3tS',\n fn)\n", (13472, 13517), False, 'import utils_scorer, utils_misc\n'), ((14949, 14979), 'os.path.exists', 'os.path.exists', (['dataset_folder'], {}), '(dataset_folder)\n', (14963, 14979), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((15069, 15096), 'os.makedirs', 'os.makedirs', (['dataset_folder'], {}), '(dataset_folder)\n', (15080, 15096), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((15880, 15892), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15889, 15892), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((18108, 18135), 'pandas.DataFrame', 'pd.DataFrame', (['dataset_stats'], {}), '(dataset_stats)\n', (18120, 18135), True, 'import json, os, pandas as pd, numpy as np, csv\n'), ((18482, 18547), 'utils_scorer.choose_best_threshold', 'utils_scorer.choose_best_threshold', (['dataset_labels', 'dataset_preds'], {}), '(dataset_labels, dataset_preds)\n', (18516, 18547), False, 'import utils_scorer, utils_misc\n'), ((18687, 18727), 'numpy.mean', 'np.mean', (["[t['score'] for t in benchmark]"], {}), "([t['score'] for t in benchmark])\n", (18694, 18727), True, 'import json, os, pandas as pd, numpy as np, csv\n'), ((19191, 19225), 'random.shuffle', 'random.shuffle', (["dataset['dataset']"], {}), "(dataset['dataset'])\n", (19205, 19225), False, 'import random\n'), ((1860, 1880), 'datasets.load_dataset', 'load_dataset', (['"""xsum"""'], {}), "('xsum')\n", (1872, 1880), False, 'from datasets import load_dataset\n'), ((2829, 2859), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_file', '"""r"""'], {}), "(zip_file, 'r')\n", (2844, 2859), False, 'import requests, zipfile, tarfile\n'), ((3183, 3195), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3192, 3195), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((6033, 6046), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (6043, 6046), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((8156, 8173), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (8168, 8173), False, 'import requests, zipfile, tarfile\n'), ((10789, 10806), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (10801, 10806), False, 'import requests, zipfile, tarfile\n'), ((15244, 15331), 'requests.get', 'requests.get', (["('https://raw.githubusercontent.com/artidoro/frank/main/data/%s' % fn)"], {}), "(\n 'https://raw.githubusercontent.com/artidoro/frank/main/data/%s' % fn)\n", (15256, 15331), False, 'import requests, zipfile, tarfile\n'), ((3112, 3144), 'os.path.join', 'os.path.join', (['dataset_folder', 'fn'], {}), '(dataset_folder, fn)\n', (3124, 3144), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((5716, 5792), 'os.path.join', 'os.path.join', (['dataset_folder', '"""hallucination_annotations_xsum_summaries.csv"""'], {}), "(dataset_folder, 'hallucination_annotations_xsum_summaries.csv')\n", (5728, 5792), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((8895, 8936), 'pandas.read_excel', 'pd.read_excel', (['fn'], {'sheet_name': '"""Error Log"""'}), "(fn, sheet_name='Error Log')\n", (8908, 8936), True, 'import json, os, pandas as pd, numpy as np, csv\n'), ((11104, 11142), 'os.path.join', 'os.path.join', (['dataset_folder', 'zip_name'], {}), '(dataset_folder, zip_name)\n', (11116, 11142), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((11224, 11314), 'os.path.join', 'os.path.join', (['dataset_folder', '"""unpaired_generated_data/data-original/data-train.jsonl"""'], {}), "(dataset_folder,\n 'unpaired_generated_data/data-original/data-train.jsonl')\n", (11236, 11314), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((11483, 11499), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (11493, 11499), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((13610, 13626), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (13620, 13626), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((8200, 8287), 'os.path.join', 'os.path.join', (['dataset_folder', "('Human_Annotation_Summarization_%s.xlsm' % model_name)"], {}), "(dataset_folder, 'Human_Annotation_Summarization_%s.xlsm' %\n model_name)\n", (8212, 8287), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((10833, 10871), 'os.path.join', 'os.path.join', (['dataset_folder', 'zip_name'], {}), '(dataset_folder, zip_name)\n', (10845, 10871), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((10975, 11013), 'os.path.join', 'os.path.join', (['dataset_folder', 'zip_name'], {}), '(dataset_folder, zip_name)\n', (10987, 11013), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((12184, 12200), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (12194, 12200), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((15353, 15385), 'os.path.join', 'os.path.join', (['dataset_folder', 'fn'], {}), '(dataset_folder, fn)\n', (15365, 15385), False, 'import json, os, pandas as pd, numpy as np, csv\n'), ((6705, 6720), 'collections.Counter', 'Counter', (['labels'], {}), '(labels)\n', (6712, 6720), False, 'from collections import Counter\n'), ((17081, 17096), 'collections.Counter', 'Counter', (['errors'], {}), '(errors)\n', (17088, 17096), False, 'from collections import Counter\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, libermatic. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def setup_defaults():
_update_settings()
def _update_settings():
def update(doctype, params):
doc = frappe.get_single(doctype)
doc.update(params)
doc.save(ignore_permissions=True)
settings = {
"Selling Settings": {"cust_master_name": "Naming Series"},
"Buying Settings": {"supp_master_name": "Naming Series"},
"Stock Settings": {"item_naming_by": "Naming Series"},
}
return [update(*x) for x in settings.items()]
| [
"frappe.get_single",
"frappe.whitelist"
] | [((184, 202), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (200, 202), False, 'import frappe\n'), ((321, 347), 'frappe.get_single', 'frappe.get_single', (['doctype'], {}), '(doctype)\n', (338, 347), False, 'import frappe\n')] |
# encoding: UTF-8
from datetime import datetime
from program_top.components.standalone_working_class.working_type_base import front_end_base
from program_top.utilities import my_datetime
class sub_trading_strategy(front_end_base):
'''
子策略基类
'''
def __init__(self,hub_strategy_ref=None,strategy_config=None):
'''
初始化任务:
1.加载品种信息
2.读取对应品种的数据序列或面板
3.加载当前的账号信息情况,确定目前的持仓、订单,本策略可以使用的资金配额(每一笔订单要有所属策略的标签,成交以后,留在主账户中的对应持仓记录必须注明是哪个策略产生的)
4.一个子策略只对应一个账户当中的一个交易品种
具体策略的初始化任务:
1.加载策略函数和若干个事件的处理函数(策略逻辑本身)
'''
super(sub_trading_strategy, self).__init__()
self.strategy_name=strategy_config['sub_strategy_name']
self.weight_percentatge=float(strategy_config['max_weight'])
self._data_period=my_datetime.get_timedelta_from_string(strategy_config['period'])#数据周期
self._is_backtesting=(strategy_config['backtest_or_trading']=='backtest')#是否是策略回验,如果是实盘或者模拟盘那就是trading,否则为backtest
if self._is_backtesting:
self._backtest_start=datetime.strptime(strategy_config['start_moment_if_backtest'],my_datetime.data_buffer_date_format)
self._backtest_end=datetime.strptime(strategy_config['end_moment_if_backtest'],my_datetime.data_buffer_date_format)
pass
def _data_panel_initialisation(self, date_back_time=None):
'''
某个品种从指定时刻开始的回溯数据面板初始化,具体继承
'''
pass
pass | [
"datetime.datetime.strptime",
"program_top.utilities.my_datetime.get_timedelta_from_string"
] | [((712, 776), 'program_top.utilities.my_datetime.get_timedelta_from_string', 'my_datetime.get_timedelta_from_string', (["strategy_config['period']"], {}), "(strategy_config['period'])\n", (749, 776), False, 'from program_top.utilities import my_datetime\n'), ((951, 1055), 'datetime.datetime.strptime', 'datetime.strptime', (["strategy_config['start_moment_if_backtest']", 'my_datetime.data_buffer_date_format'], {}), "(strategy_config['start_moment_if_backtest'], my_datetime.\n data_buffer_date_format)\n", (968, 1055), False, 'from datetime import datetime\n'), ((1072, 1174), 'datetime.datetime.strptime', 'datetime.strptime', (["strategy_config['end_moment_if_backtest']", 'my_datetime.data_buffer_date_format'], {}), "(strategy_config['end_moment_if_backtest'], my_datetime.\n data_buffer_date_format)\n", (1089, 1174), False, 'from datetime import datetime\n')] |
from flask import Blueprint, request, render_template
welcome_bp = Blueprint('welcome', __name__, url_prefix='/welcome', template_folder='templates', static_folder='static')
@welcome_bp.route("/")
def welcome():
first_name_or_title = request.args.get('name', '')
return render_template("welcome/index.html", name=first_name_or_title)
| [
"flask.render_template",
"flask.request.args.get",
"flask.Blueprint"
] | [((68, 179), 'flask.Blueprint', 'Blueprint', (['"""welcome"""', '__name__'], {'url_prefix': '"""/welcome"""', 'template_folder': '"""templates"""', 'static_folder': '"""static"""'}), "('welcome', __name__, url_prefix='/welcome', template_folder=\n 'templates', static_folder='static')\n", (77, 179), False, 'from flask import Blueprint, request, render_template\n'), ((241, 269), 'flask.request.args.get', 'request.args.get', (['"""name"""', '""""""'], {}), "('name', '')\n", (257, 269), False, 'from flask import Blueprint, request, render_template\n'), ((281, 344), 'flask.render_template', 'render_template', (['"""welcome/index.html"""'], {'name': 'first_name_or_title'}), "('welcome/index.html', name=first_name_or_title)\n", (296, 344), False, 'from flask import Blueprint, request, render_template\n')] |
import codecademylib
import pandas as pd
orders = pd.read_csv('orders.csv')
print(orders.head(10))
most_expensive = orders.price.max()
num_colors = orders.shoe_color.nunique() | [
"pandas.read_csv"
] | [((51, 76), 'pandas.read_csv', 'pd.read_csv', (['"""orders.csv"""'], {}), "('orders.csv')\n", (62, 76), True, 'import pandas as pd\n')] |
# vim: sw=4:ts=4:et
import unittest
from exchangelib.errors import DoesNotExist
from saq.email import (
normalize_email_address,
decode_rfc2822,
normalize_message_id,
get_messages_from_exchangelib_folder,
get_exchange_build,
EWSApi,
)
from saq.test import *
class TestCase(ACEBasicTestCase):
def test_normalize_email_address(self):
self.assertEquals(normalize_email_address('<EMAIL>'), '<EMAIL>')
self.assertEquals(normalize_email_address('<<EMAIL>>'), '<EMAIL>')
self.assertEquals(normalize_email_address('<<EMAIL>>'), '<EMAIL>')
self.assertEquals(normalize_email_address('"user name" <<EMAIL>>'), '<EMAIL>')
self.assertEquals(normalize_email_address('user name <<EMAIL>>'), '<EMAIL>')
def test_decode_rfc2822(self):
self.assertEquals(decode_rfc2822('=?utf-8?B?UmU6IFVyZ2VudA==?='), 'Re: Urgent')
self.assertEquals(decode_rfc2822('=?UTF-8?B?RklOQUwgREFZIC0gRU1BSUwgRVhDTFVTSVZFIC0gJDMyLjk5IEp1?= =?UTF-8?B?c3QgQmFzaWNz4oSiIDEwLVJlYW0gQ2FzZSBQYXBlcg==?='),
'FINAL DAY - EMAIL EXCLUSIVE - $32.99 Just Basics™ 10-Ream Case Paper')
self.assertEquals(decode_rfc2822('=?US-ASCII?Q?CSMS#_19-000228_-_ACE_CERTIFICATION_Scheduled_Ma?= =?US-ASCII?Q?intenance,_Wed._May_1,_2019_@_1700_ET_to_2000_ET?='),
'CSMS# 19-000228 - ACE CERTIFICATION Scheduled Maintenance, Wed. May 1, 2019 @ 1700 ET to 2000 ET')
self.assertEquals(decode_rfc2822('=?Windows-1252?Q?Money_Talk_=96_Profit=99_Performance_Monitor_(Honeywell_?= =?Windows-1252?Q?Webinar)?='),
'Money Talk – Profit™ Performance Monitor (Honeywell Webinar)')
self.assertEquals(decode_rfc2822('=?ISO-8859-1?Q?Puede_que_algunos_contribuyentes_tengan_?= =?ISO-8859-1?Q?que_enmendar_su_declaraci=F3n_de_impuestos?='),
'Puede que algunos contribuyentes tengan que enmendar su declaración de impuestos')
self.assertEquals(decode_rfc2822('=?GBK?B?UmU6gYbKssC8tcTNxo9Wst/C1A==?='),
'Re:亞什兰的推廣策略')
class TestMessageIdFormatter(unittest.TestCase):
def setUp(self):
self.expected_message_id = '<<EMAIL>>'
# TODO - move check_message_id to shared location as well as the tests for it.
def test_normalize_message_id_no_brackets(self):
message_id = '<EMAIL>'
self.assertEqual(self.expected_message_id, normalize_message_id(message_id))
def test_normalize_message_id_prepended_bracket_only(self):
message_id = '<<EMAIL>'
self.assertEqual(self.expected_message_id, normalize_message_id(message_id))
def test_normalize_message_id_appended_bracket_only(self):
message_id = '<EMAIL>>'
self.assertEqual(self.expected_message_id, normalize_message_id(message_id))
def test_normalize_message_id_already_proper_format(self):
self.assertEqual(self.expected_message_id, normalize_message_id(self.expected_message_id))
def test_normalize_message_id_with_stripable_string(self):
message_id = ' <EMAIL>>\n'
self.assertEqual(self.expected_message_id, normalize_message_id(message_id))
class TestGettingMessagesFromExchangelibFolder(unittest.TestCase):
def test_get_messages_from_exchangelib_folder_doesnt_exist(self):
class TestFolder:
absolute = 'placeholder'
def filter(*args, **kwargs):
raise DoesNotExist("doesnt exist testing")
folder = TestFolder
result = get_messages_from_exchangelib_folder(folder, '<<EMAIL>>')
self.assertEqual([], result)
def test_get_messages_from_exchangelib_folder_success(self):
class TestFolder:
absolute = 'placeholder'
def filter(*args, **kwargs):
return ['expected1', 'expected2']
folder = TestFolder
result = get_messages_from_exchangelib_folder(folder, '<<EMAIL>>')
self.assertEqual(['expected1', 'expected2'], result)
class TestExchangeBuild(unittest.TestCase):
def test_get_exchange_build_value_error_invalid_version(self):
class FakeModule:
pass
self.assertRaises(ValueError, get_exchange_build, version='NotExchange', version_module=FakeModule)
def test_get_exchange_build_value_attribute_error(self):
class FakeModule:
pass
self.assertRaises(AttributeError, get_exchange_build, version='Exchange2016', version_module=FakeModule)
def test_get_exchange_build_value_success(self):
class FakeModule:
EXCHANGE_2010_SP2 = 'expected'
r = get_exchange_build(version="Exchange2010_SP2", version_module=FakeModule)
self.assertEqual('expected', r)
class TestEWSApi(unittest.TestCase):
def setUp(self):
class AccountFake:
def __init__(self, email, access_type=None, credentials=None, config=None):
self.email = email
self.access_type = access_type
self.credentials = credentials
self.config = config
self.primary_smtp_address = self.email
self.account_class = AccountFake
def test_api_init_custom_adapter(self):
class FakeAdapter:
def __init__(self):
pass
adapter = FakeAdapter()
import exchangelib
_ = EWSApi('user1', '<PASSWORD>', adapter=adapter)
self.assertIsInstance(exchangelib.protocol.BaseProtocol.HTTP_ADAPTER_CLS, FakeAdapter)
def test_initialize_no_password_raise_value_error(self):
ews_api = EWSApi('user1', '')
self.assertRaises(ValueError, ews_api.initialize)
def test_initialize_password_is_good(self):
ews_api = EWSApi('user1', '<PASSWORD>')
try:
ews_api.initialize()
except Exception as e:
self.fail(f"Should not have raised exception but raised {e.__class__}: '{e}'")
def test_load_account_new_account(self):
ews_api = EWSApi('user1', '<PASSWORD>')
ews_api.load_account('<EMAIL>', account_class=self.account_class)
self.assertEqual('<EMAIL>', ews_api._account.primary_smtp_address)
self.assertIsInstance(ews_api._account, self.account_class)
def test_load_account_existing_account_same_smtp_address(self):
ews_api = EWSApi('user1', '<PASSWORD>')
ews_api._account = self.account_class(
'<EMAIL>',
access_type=ews_api.access_type,
credentials=ews_api.credentials,
config=ews_api.config,
)
class NotExpected:
def __init__(self, *args, **kwargs):
pass
ews_api.load_account(' <EMAIL> ', account_class=NotExpected)
self.assertEqual('<EMAIL>', ews_api._account.primary_smtp_address)
self.assertIsInstance(ews_api._account, self.account_class)
self.assertNotIsInstance(ews_api._account, NotExpected)
def test_load_account_existing_account_but_requesting_new_email(self):
ews_api = EWSApi('user1', '<PASSWORD>')
ews_api._account = self.account_class(
'<EMAIL>',
access_type=ews_api.access_type,
credentials=ews_api.credentials,
config=ews_api.config,
)
class Expected(self.account_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ews_api.load_account('<EMAIL>', account_class=Expected)
self.assertEqual('<EMAIL>', ews_api._account.primary_smtp_address)
self.assertIsInstance(ews_api._account, Expected)
def test_get_account(self):
ews_api = EWSApi('user1', '<PASSWORD>')
ews_api._account = 'expected'
def stub(*args, **kwargs):
pass
result = ews_api.get_account('<EMAIL>', load=stub)
self.assertEqual('expected', result)
def test_account_property(self):
ews_api = EWSApi('user1', '<PASSWORD>')
ews_api._account = 'expected'
self.assertEqual('expected', ews_api.account)
| [
"saq.email.get_exchange_build",
"exchangelib.errors.DoesNotExist",
"saq.email.decode_rfc2822",
"saq.email.normalize_email_address",
"saq.email.get_messages_from_exchangelib_folder",
"saq.email.normalize_message_id",
"saq.email.EWSApi"
] | [((3519, 3576), 'saq.email.get_messages_from_exchangelib_folder', 'get_messages_from_exchangelib_folder', (['folder', '"""<<EMAIL>>"""'], {}), "(folder, '<<EMAIL>>')\n", (3555, 3576), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((3879, 3936), 'saq.email.get_messages_from_exchangelib_folder', 'get_messages_from_exchangelib_folder', (['folder', '"""<<EMAIL>>"""'], {}), "(folder, '<<EMAIL>>')\n", (3915, 3936), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((4615, 4688), 'saq.email.get_exchange_build', 'get_exchange_build', ([], {'version': '"""Exchange2010_SP2"""', 'version_module': 'FakeModule'}), "(version='Exchange2010_SP2', version_module=FakeModule)\n", (4633, 4688), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((5365, 5411), 'saq.email.EWSApi', 'EWSApi', (['"""user1"""', '"""<PASSWORD>"""'], {'adapter': 'adapter'}), "('user1', '<PASSWORD>', adapter=adapter)\n", (5371, 5411), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((5587, 5606), 'saq.email.EWSApi', 'EWSApi', (['"""user1"""', '""""""'], {}), "('user1', '')\n", (5593, 5606), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((5732, 5761), 'saq.email.EWSApi', 'EWSApi', (['"""user1"""', '"""<PASSWORD>"""'], {}), "('user1', '<PASSWORD>')\n", (5738, 5761), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((5994, 6023), 'saq.email.EWSApi', 'EWSApi', (['"""user1"""', '"""<PASSWORD>"""'], {}), "('user1', '<PASSWORD>')\n", (6000, 6023), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((6328, 6357), 'saq.email.EWSApi', 'EWSApi', (['"""user1"""', '"""<PASSWORD>"""'], {}), "('user1', '<PASSWORD>')\n", (6334, 6357), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((7030, 7059), 'saq.email.EWSApi', 'EWSApi', (['"""user1"""', '"""<PASSWORD>"""'], {}), "('user1', '<PASSWORD>')\n", (7036, 7059), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((7656, 7685), 'saq.email.EWSApi', 'EWSApi', (['"""user1"""', '"""<PASSWORD>"""'], {}), "('user1', '<PASSWORD>')\n", (7662, 7685), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((7936, 7965), 'saq.email.EWSApi', 'EWSApi', (['"""user1"""', '"""<PASSWORD>"""'], {}), "('user1', '<PASSWORD>')\n", (7942, 7965), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((391, 425), 'saq.email.normalize_email_address', 'normalize_email_address', (['"""<EMAIL>"""'], {}), "('<EMAIL>')\n", (414, 425), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((464, 500), 'saq.email.normalize_email_address', 'normalize_email_address', (['"""<<EMAIL>>"""'], {}), "('<<EMAIL>>')\n", (487, 500), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((539, 575), 'saq.email.normalize_email_address', 'normalize_email_address', (['"""<<EMAIL>>"""'], {}), "('<<EMAIL>>')\n", (562, 575), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((614, 662), 'saq.email.normalize_email_address', 'normalize_email_address', (['""""user name" <<EMAIL>>"""'], {}), '(\'"user name" <<EMAIL>>\')\n', (637, 662), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((701, 747), 'saq.email.normalize_email_address', 'normalize_email_address', (['"""user name <<EMAIL>>"""'], {}), "('user name <<EMAIL>>')\n", (724, 747), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((822, 868), 'saq.email.decode_rfc2822', 'decode_rfc2822', (['"""=?utf-8?B?UmU6IFVyZ2VudA==?="""'], {}), "('=?utf-8?B?UmU6IFVyZ2VudA==?=')\n", (836, 868), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((910, 1059), 'saq.email.decode_rfc2822', 'decode_rfc2822', (['"""=?UTF-8?B?RklOQUwgREFZIC0gRU1BSUwgRVhDTFVTSVZFIC0gJDMyLjk5IEp1?= =?UTF-8?B?c3QgQmFzaWNz4oSiIDEwLVJlYW0gQ2FzZSBQYXBlcg==?="""'], {}), "(\n '=?UTF-8?B?RklOQUwgREFZIC0gRU1BSUwgRVhDTFVTSVZFIC0gJDMyLjk5IEp1?= =?UTF-8?B?c3QgQmFzaWNz4oSiIDEwLVJlYW0gQ2FzZSBQYXBlcg==?='\n )\n", (924, 1059), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((1176, 1331), 'saq.email.decode_rfc2822', 'decode_rfc2822', (['"""=?US-ASCII?Q?CSMS#_19-000228_-_ACE_CERTIFICATION_Scheduled_Ma?= =?US-ASCII?Q?intenance,_Wed._May_1,_2019_@_1700_ET_to_2000_ET?="""'], {}), "(\n '=?US-ASCII?Q?CSMS#_19-000228_-_ACE_CERTIFICATION_Scheduled_Ma?= =?US-ASCII?Q?intenance,_Wed._May_1,_2019_@_1700_ET_to_2000_ET?='\n )\n", (1190, 1331), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((1476, 1607), 'saq.email.decode_rfc2822', 'decode_rfc2822', (['"""=?Windows-1252?Q?Money_Talk_=96_Profit=99_Performance_Monitor_(Honeywell_?= =?Windows-1252?Q?Webinar)?="""'], {}), "(\n '=?Windows-1252?Q?Money_Talk_=96_Profit=99_Performance_Monitor_(Honeywell_?= =?Windows-1252?Q?Webinar)?='\n )\n", (1490, 1607), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((1716, 1861), 'saq.email.decode_rfc2822', 'decode_rfc2822', (['"""=?ISO-8859-1?Q?Puede_que_algunos_contribuyentes_tengan_?= =?ISO-8859-1?Q?que_enmendar_su_declaraci=F3n_de_impuestos?="""'], {}), "(\n '=?ISO-8859-1?Q?Puede_que_algunos_contribuyentes_tengan_?= =?ISO-8859-1?Q?que_enmendar_su_declaraci=F3n_de_impuestos?='\n )\n", (1730, 1861), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((1990, 2046), 'saq.email.decode_rfc2822', 'decode_rfc2822', (['"""=?GBK?B?UmU6gYbKssC8tcTNxo9Wst/C1A==?="""'], {}), "('=?GBK?B?UmU6gYbKssC8tcTNxo9Wst/C1A==?=')\n", (2004, 2046), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((2428, 2460), 'saq.email.normalize_message_id', 'normalize_message_id', (['message_id'], {}), '(message_id)\n', (2448, 2460), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((2610, 2642), 'saq.email.normalize_message_id', 'normalize_message_id', (['message_id'], {}), '(message_id)\n', (2630, 2642), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((2791, 2823), 'saq.email.normalize_message_id', 'normalize_message_id', (['message_id'], {}), '(message_id)\n', (2811, 2823), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((2940, 2986), 'saq.email.normalize_message_id', 'normalize_message_id', (['self.expected_message_id'], {}), '(self.expected_message_id)\n', (2960, 2986), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((3138, 3170), 'saq.email.normalize_message_id', 'normalize_message_id', (['message_id'], {}), '(message_id)\n', (3158, 3170), False, 'from saq.email import normalize_email_address, decode_rfc2822, normalize_message_id, get_messages_from_exchangelib_folder, get_exchange_build, EWSApi\n'), ((3437, 3473), 'exchangelib.errors.DoesNotExist', 'DoesNotExist', (['"""doesnt exist testing"""'], {}), "('doesnt exist testing')\n", (3449, 3473), False, 'from exchangelib.errors import DoesNotExist\n')] |
#!/usr/bin/env python
"""
Module implementing the Data class that manages data for
it's associated PandasTable.
Created Jan 2014
Copyright (C) <NAME>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from types import *
import operator
import os, string, types, copy
import pickle
import numpy as np
import pandas as pd
from . import util
class TableModel(object):
"""A data model for the Table class that uses pandas
Args:
dataframe: pandas dataframe
rows: number of rows if empty table
columns: number of columns if empty table
"""
keywords = {'colors':'colors'}
def __init__(self, dataframe=None, rows=20, columns=5):
"""Constructor for table model. """
self.initialiseFields()
self.setup(dataframe, rows, columns)
return
def setup(self, dataframe, rows=20, columns=5):
"""Create table model"""
if not dataframe is None:
self.df = dataframe
else:
colnames = list(string.ascii_lowercase[:columns])
self.df = pd.DataFrame(index=range(rows),columns=colnames)
#self.df = self.getSampleData()
#self.reclist = self.df.index # not needed now?
return
@classmethod
def getSampleData(self, rows=400, cols=5, n=2):
"""Generate sample data
Args:
rows: no. of rows
cols: columns
n: length of column names
"""
import random
s = string.ascii_lowercase
def genstr(n=2):
return ''.join(random.choice(s) for i in range(n))
maxrows = 5e6
if rows>maxrows:
rows=maxrows
if cols>1e5:
cols=int(1e5)
n=2
if cols>100: n=3
colnames = [genstr(n) for i in range(cols)]
coldata = [np.random.normal(x,1,rows) for x in np.random.normal(5,3,cols)]
n = np.array(coldata).T
df = pd.DataFrame(n, columns=colnames)
col1 = colnames[0]
col2 = colnames[1]
df[col2] = df[col1]*np.random.normal(.8, .2, len(df))
df = np.round(df, 3)
cats = ['low','medium','high','very high']
df['label'] = pd.cut(df[col1], bins=4, labels=cats).astype(str)
#df['label'] = df.label.cat.as_ordered()
#don't add date if rows too large
if rows<2e6:
df['date'] = pd.date_range('1/1/2016', periods=rows, freq='H')
return df
@classmethod
def getIrisData(self):
"""Get iris dataset"""
path = os.path.dirname(__file__)
cols = ['sepal length','sepal width','petal length','petal width','class']
df = pd.read_csv(os.path.join(path,'datasets','iris.data'),names=cols)
return df
@classmethod
def getStackedData(self):
"""Get a dataframe to pivot test"""
import pandas.util.testing as tm; tm.N = 4
frame = tm.makeTimeDataFrame()
N, K = frame.shape
data = {'value' : frame.values.ravel('F'),
'variable' : np.asarray(frame.columns).repeat(N),
'date' : np.tile(np.asarray(frame.index), K)}
return pd.DataFrame(data, columns=['date', 'variable', 'value'])
def initialiseFields(self):
"""Create meta data fields"""
self.meta = {}
self.columnwidths = {} #used to store col widths
return
def save(self, filename):
"""Save dataframe"""
ftype = os.path.splitext(filename)[1]
if ftype == '.mpk':
self.df.to_msgpack(filename)
elif ftype == '.pickle':
self.df.to_pickle(filename)
elif ftype == '.xls':
self.df.to_excel(filename)
elif ftype == '.csv':
self.df.to_csv(filename)
#elif ftype == '.html':
# self.df.to_html(filename)
return
def load(self, filename, filetype=None):
"""Load file, if no filetype given assume it's msgpack format"""
if filetype == '.pickle':
self.df = pd.read_pickle(filename)
else:
self.df = pd.read_msgpack(filename)
#print (len(self.df))
return
def getlongestEntry(self, colindex, n=500):
"""Get the longest string in the column for determining width. Just uses the first
n rows for speed"""
df = self.df
col = df.columns[colindex]
try:
if df.dtypes[col] == 'float64':
c = df[col][:n].round(3)
else:
c = df[col][:n]
except:
return 1
longest = c.astype('object').astype('str').str.len().max()
if np.isnan(longest):
return 1
return longest
def getRecordAtRow(self, rowIndex):
"""Get the entire record at the specifed row"""
name = self.getRecName(rowIndex)
record = self.df.ix[name]
return record
def moveColumn(self, oldindex, newindex):
"""Changes the order of columns"""
df = self.df
cols = list(df.columns)
name = cols[oldindex]
del cols[oldindex]
cols.insert(newindex, name)
self.df = df[cols]
return
def autoAddRows(self, num):
"""Add n rows to end of dataframe. Will create rows with index starting
from highest previous row count"""
df = self.df
if len(df) == 0:
self.df = pd.DataFrame(pd.Series(range(num)))
print (df)
return
try:
ind = self.df.index.max()+1
except:
ind = len(df)+1
new = pd.DataFrame(np.nan, index=range(ind,ind+num), columns=df.columns)
self.df = pd.concat([df, new])
return
def addRow(self, rowindex):
"""Inserts a row at the required index by append/concat"""
df = self.df
a, b = df[:rowindex], df[rowindex:]
a = a.append(pd.Series(), ignore_index=1)
self.df = pd.concat([a,b])
return
def deleteRow(self, row, unique=True):
"""Delete a row"""
self.deleteRows([row], unique)
return
def deleteRows(self, rowlist=None, unique=True):
"""Delete multiple or all rows"""
df = self.df
if unique == True:
rows = list(set(range(len(df))) - set(rowlist))
self.df = df.iloc[rows]
else:
df.drop(df.index[rowlist],inplace=True)
return
def addColumn(self, colname=None, dtype=None, data=None):
"""Add a column"""
if data is None:
data = pd.Series(dtype=dtype)
self.df[colname] = data
return
def deleteColumn(self, colindex):
"""delete a column"""
df = self.df
colname = df.columns[colindex]
df.drop([colname], axis=1, inplace=True)
return
def deleteColumns(self, cols=None):
"""Remove all cols or list provided"""
df = self.df
colnames = df.columns[cols]
df.drop(colnames, axis=1, inplace=True)
return
def deleteCells(self, rows, cols):
self.df.iloc[rows,cols] = np.nan
return
def resetIndex(self):
"""Reset index behaviour"""
df = self.df
if df.index.name != None or df.index.names[0] != None:
drop = False
else:
drop = True
df.reset_index(drop=drop,inplace=True)
return
def setindex(self, colindex):
"""Index setting behaviour"""
df = self.df
colnames = list(df.columns[colindex])
indnames = df.index.names
if indnames[0] != None:
df.reset_index(inplace=True)
df.set_index(colnames, inplace=True)
return
def copyIndex(self):
"""Copy index to a column"""
df = self.df
name = df.index.name
if name == None: name='index'
df[name] = df.index#.astype('object')
return
def groupby(self, cols):
"""Group by cols"""
df = self.df
colnames = df.columns[cols]
grps = df.groupby(colnames)
return grps
def getColumnType(self, columnIndex):
"""Get the column type"""
coltype = self.df.dtypes[columnIndex]
return coltype
def getColumnCount(self):
"""Returns the number of columns in the data model"""
return len(self.df.columns)
def getColumnName(self, columnIndex):
"""Returns the name of the given column by columnIndex"""
return str(self.df.columns[columnIndex])
def getColumnData(self, columnIndex=None, columnName=None,
filters=None):
"""Return the data in a list for this col,
filters is a tuple of the form (key,value,operator,bool)"""
if columnIndex != None and columnIndex < len(self.columnNames):
columnName = self.getColumnName(columnIndex)
names = Filtering.doFiltering(searchfunc=self.filterBy,
filters=filters)
coldata = [self.data[n][columnName] for n in names]
return coldata
def getColumns(self, colnames, filters=None, allowempty=True):
"""Get column data for multiple cols, with given filter options,
filterby: list of tuples of the form (key,value,operator,bool)
allowempty: boolean if false means rows with empty vals for any
required fields are not returned
returns: lists of column data"""
def evaluate(l):
for i in l:
if i == '' or i == None:
return False
return True
coldata=[]
for c in colnames:
vals = self.getColumnData(columnName=c, filters=filters)
coldata.append(vals)
if allowempty == False:
result = [i for i in zip(*coldata) if evaluate(i) == True]
coldata = list(zip(*result))
return coldata
def getRowCount(self):
"""Returns the number of rows in the table model."""
return len(self.df)
def getValueAt(self, rowindex, colindex):
"""Returns the cell value at location specified
by columnIndex and rowIndex."""
df = self.df
value = self.df.iloc[rowindex,colindex]
if type(value) is float and np.isnan(value):
return ''
return value
def setValueAt(self, value, rowindex, colindex):
"""Changed the dictionary when cell is updated by user"""
if value == '':
value = np.nan
dtype = self.df.dtypes[colindex]
#try to cast to column type
try:
if dtype == 'float64':
value = float(value)
elif dtype == 'int':
value = int(value)
elif dtype == 'datetime64[ns]':
value = pd.to_datetime(value)
except Exception as e:
print (e)
self.df.iloc[rowindex,colindex] = value
return
def transpose(self):
"""Transpose dataframe"""
df = self.df
rows = df.index
df = df.transpose()
df.reset_index()
if util.check_multiindex(df.columns) != 1:
try:
df.columns = df.columns.astype(str)
except:
pass
self.df = df.convert_objects()
self.columnwidths = {}
return
def query(self):
return
def filterby(self):
import filtering
funcs = filtering.operatornames
floatops = ['=','>','<']
func = funcs[op]
return
def __repr__(self):
return 'Table Model with %s rows' %len(self.df)
| [
"numpy.random.normal",
"pandas.read_pickle",
"pandas.util.testing.makeTimeDataFrame",
"pandas.Series",
"random.choice",
"pandas.to_datetime",
"os.path.join",
"os.path.splitext",
"pandas.read_msgpack",
"pandas.cut",
"numpy.asarray",
"os.path.dirname",
"numpy.array",
"numpy.isnan",
"pandas.date_range",
"pandas.DataFrame",
"pandas.concat",
"numpy.round"
] | [((2617, 2650), 'pandas.DataFrame', 'pd.DataFrame', (['n'], {'columns': 'colnames'}), '(n, columns=colnames)\n', (2629, 2650), True, 'import pandas as pd\n'), ((2780, 2795), 'numpy.round', 'np.round', (['df', '(3)'], {}), '(df, 3)\n', (2788, 2795), True, 'import numpy as np\n'), ((3216, 3241), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3231, 3241), False, 'import os, string, types, copy\n'), ((3582, 3604), 'pandas.util.testing.makeTimeDataFrame', 'tm.makeTimeDataFrame', ([], {}), '()\n', (3602, 3604), True, 'import pandas.util.testing as tm\n'), ((3826, 3883), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['date', 'variable', 'value']"}), "(data, columns=['date', 'variable', 'value'])\n", (3838, 3883), True, 'import pandas as pd\n'), ((5322, 5339), 'numpy.isnan', 'np.isnan', (['longest'], {}), '(longest)\n', (5330, 5339), True, 'import numpy as np\n'), ((6361, 6381), 'pandas.concat', 'pd.concat', (['[df, new]'], {}), '([df, new])\n', (6370, 6381), True, 'import pandas as pd\n'), ((6631, 6648), 'pandas.concat', 'pd.concat', (['[a, b]'], {}), '([a, b])\n', (6640, 6648), True, 'import pandas as pd\n'), ((2508, 2536), 'numpy.random.normal', 'np.random.normal', (['x', '(1)', 'rows'], {}), '(x, 1, rows)\n', (2524, 2536), True, 'import numpy as np\n'), ((2584, 2601), 'numpy.array', 'np.array', (['coldata'], {}), '(coldata)\n', (2592, 2601), True, 'import numpy as np\n'), ((3056, 3105), 'pandas.date_range', 'pd.date_range', (['"""1/1/2016"""'], {'periods': 'rows', 'freq': '"""H"""'}), "('1/1/2016', periods=rows, freq='H')\n", (3069, 3105), True, 'import pandas as pd\n'), ((3350, 3393), 'os.path.join', 'os.path.join', (['path', '"""datasets"""', '"""iris.data"""'], {}), "(path, 'datasets', 'iris.data')\n", (3362, 3393), False, 'import os, string, types, copy\n'), ((4127, 4153), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (4143, 4153), False, 'import os, string, types, copy\n'), ((4697, 4721), 'pandas.read_pickle', 'pd.read_pickle', (['filename'], {}), '(filename)\n', (4711, 4721), True, 'import pandas as pd\n'), ((4758, 4783), 'pandas.read_msgpack', 'pd.read_msgpack', (['filename'], {}), '(filename)\n', (4773, 4783), True, 'import pandas as pd\n'), ((6584, 6595), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (6593, 6595), True, 'import pandas as pd\n'), ((7246, 7268), 'pandas.Series', 'pd.Series', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (7255, 7268), True, 'import pandas as pd\n'), ((11000, 11015), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (11008, 11015), True, 'import numpy as np\n'), ((2544, 2572), 'numpy.random.normal', 'np.random.normal', (['(5)', '(3)', 'cols'], {}), '(5, 3, cols)\n', (2560, 2572), True, 'import numpy as np\n'), ((2869, 2906), 'pandas.cut', 'pd.cut', (['df[col1]'], {'bins': '(4)', 'labels': 'cats'}), '(df[col1], bins=4, labels=cats)\n', (2875, 2906), True, 'import pandas as pd\n'), ((3782, 3805), 'numpy.asarray', 'np.asarray', (['frame.index'], {}), '(frame.index)\n', (3792, 3805), True, 'import numpy as np\n'), ((2245, 2261), 'random.choice', 'random.choice', (['s'], {}), '(s)\n', (2258, 2261), False, 'import random\n'), ((3712, 3737), 'numpy.asarray', 'np.asarray', (['frame.columns'], {}), '(frame.columns)\n', (3722, 3737), True, 'import numpy as np\n'), ((11531, 11552), 'pandas.to_datetime', 'pd.to_datetime', (['value'], {}), '(value)\n', (11545, 11552), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
coolisf REST API for Django site
"""
# This code is a part of coolisf library: https://github.com/letuananh/intsem.fx
# :copyright: (c) 2014 <NAME> <<EMAIL>>
# :license: MIT, see LICENSE for more details.
import json
import logging
import django
from django.http import HttpResponse, Http404
from texttaglib.chirptext import ttl
import coolisf
from coolisf import GrammarHub
from coolisf.model import Reading
# ---------------------------------------------------------------------
# CONFIGURATION
# ---------------------------------------------------------------------
RESULTS = (1, 5, 10, 20, 30, 40, 50, 100, 500)
TAGGERS = {ttl.Tag.LELESK: "LeLesk", ttl.Tag.MFS: "MFS", ttl.Tag.DEFAULT: "None"}
ghub = GrammarHub()
# ---------------------------------------------------------------------
# VIEWS
# ---------------------------------------------------------------------
def jsonp(func):
''' JSON/JSONP decorator '''
def decorator(request, *args, **kwargs):
objects = func(request, *args, **kwargs)
# ignore HttpResponse
if isinstance(objects, HttpResponse):
return objects
# JSON/JSONP response
data = json.dumps(objects)
if 'callback' in request.GET:
callback = request.GET['callback']
elif 'callback' in request.POST:
callback = request.POST['callback']
else:
return HttpResponse(data, "application/json")
# is JSONP
# logging.debug("A jsonp response")
data = '{c}({d});'.format(c=callback, d=data)
return HttpResponse(data, "application/javascript")
return decorator
def index(request):
return HttpResponse('coolisf-REST is up and running - coolisf-{v}/Django-{dv}'.format(v=coolisf.__version__, dv=django.get_version()), 'text/html')
@jsonp
def generate(request):
grammar = request.POST.get('grammar', '')
# parse_count = request.GET['parse_count']
mrs = request.POST.get('mrs', '')
print("Grammar: {}".format(grammar))
print("MRS: {}".format(mrs))
if grammar not in ghub.names:
raise Http404('Unknown grammar')
sents = [s.text for s in ghub[grammar].generate(Reading(mrs))]
print("Generated: {}".format(sents))
return sents
@jsonp
def parse(request):
''' Parse a sentence using ISF
Mapping: /restisf/parse/ '''
# inputs
sentence_text = request.GET['sent']
parse_count = request.GET['parse_count']
tagger = request.GET['tagger']
grammar = request.GET['grammar']
# validation
if not sentence_text:
raise Http404('Sentence cannot be empty')
elif int(parse_count) < 0:
raise Http404('Invalid parse count: ' + parse_count)
elif tagger not in TAGGERS:
raise Http404('Unknown tagger: ' + tagger)
elif grammar not in ghub.names:
raise Http404('Unknown grammar')
# Parse sentence
logging.getLogger(__name__).info("Parsing sentence: ... " + sentence_text)
sent = ghub.parse_json(sentence_text, grammar, parse_count, tagger)
logging.getLogger(__name__).debug("Shallow: {}".format(sent['shallow']))
logging.getLogger(__name__).debug("Parses: {}".format(len(sent)))
logging.getLogger(__name__).info("Done parsing")
return sent
@jsonp
def version(request):
return {'product': 'djangoisf',
'server': 'coolisf-{}/Django-{}'.format(coolisf.__version__, django.get_version())}
| [
"coolisf.GrammarHub",
"logging.getLogger",
"coolisf.model.Reading",
"django.get_version",
"django.http.HttpResponse",
"json.dumps",
"django.http.Http404"
] | [((762, 774), 'coolisf.GrammarHub', 'GrammarHub', ([], {}), '()\n', (772, 774), False, 'from coolisf import GrammarHub\n'), ((1222, 1241), 'json.dumps', 'json.dumps', (['objects'], {}), '(objects)\n', (1232, 1241), False, 'import json\n'), ((1620, 1664), 'django.http.HttpResponse', 'HttpResponse', (['data', '"""application/javascript"""'], {}), "(data, 'application/javascript')\n", (1632, 1664), False, 'from django.http import HttpResponse, Http404\n'), ((2145, 2171), 'django.http.Http404', 'Http404', (['"""Unknown grammar"""'], {}), "('Unknown grammar')\n", (2152, 2171), False, 'from django.http import HttpResponse, Http404\n'), ((2622, 2657), 'django.http.Http404', 'Http404', (['"""Sentence cannot be empty"""'], {}), "('Sentence cannot be empty')\n", (2629, 2657), False, 'from django.http import HttpResponse, Http404\n'), ((2703, 2749), 'django.http.Http404', 'Http404', (["('Invalid parse count: ' + parse_count)"], {}), "('Invalid parse count: ' + parse_count)\n", (2710, 2749), False, 'from django.http import HttpResponse, Http404\n'), ((2936, 2963), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2953, 2963), False, 'import logging\n'), ((3087, 3114), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3104, 3114), False, 'import logging\n'), ((3164, 3191), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3181, 3191), False, 'import logging\n'), ((3234, 3261), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3251, 3261), False, 'import logging\n'), ((3439, 3459), 'django.get_version', 'django.get_version', ([], {}), '()\n', (3457, 3459), False, 'import django\n'), ((1449, 1487), 'django.http.HttpResponse', 'HttpResponse', (['data', '"""application/json"""'], {}), "(data, 'application/json')\n", (1461, 1487), False, 'from django.http import HttpResponse, Http404\n'), ((1824, 1844), 'django.get_version', 'django.get_version', ([], {}), '()\n', (1842, 1844), False, 'import django\n'), ((2224, 2236), 'coolisf.model.Reading', 'Reading', (['mrs'], {}), '(mrs)\n', (2231, 2236), False, 'from coolisf.model import Reading\n'), ((2796, 2832), 'django.http.Http404', 'Http404', (["('Unknown tagger: ' + tagger)"], {}), "('Unknown tagger: ' + tagger)\n", (2803, 2832), False, 'from django.http import HttpResponse, Http404\n'), ((2883, 2909), 'django.http.Http404', 'Http404', (['"""Unknown grammar"""'], {}), "('Unknown grammar')\n", (2890, 2909), False, 'from django.http import HttpResponse, Http404\n')] |
# -*- coding: utf-8 -*-
# Visigoth: A lightweight Python3 library for rendering data visualizations in SVG
# Copyright (C) 2020-2021 Visigoth Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import datetime
from visigoth.charts import ChartElement
from visigoth.internal.svg import rectangle, text, line
from visigoth.common.axis import ContinuousAxis, DiscreteAxis
from visigoth.internal.utils.data import Dataset
from visigoth.utils import DiscreteHueManager, ContinuousHueManager
class Gantt(ChartElement):
"""
Create a Gantt Chart
Arguments:
data (dict): A relational data set (for example, list of dicts/lists/tuples describing each row)
Keyword Arguments:
x (str or int): Identify the column to yield a continuous value (typically datetimes)
y (str or int): Identify the column to yield discrete values identifying different entities in the data
hue (str or int): Identify the column to define the hue(colour)
size (float or int): Control the size (the bar width in this case)
width (int): the width of the plot in pixels
height (int): the height of the plot in pixels
hue_manager(list) : a DiscreteHueManager|ContinuousHueManager object
stroke (str): stroke color for bars
stroke_width (int): stroke width for bars
font_height (int): the height of the font for text labels
spacing_fraction (float) : ratio of bar width to spacing
text_attributes (dict): SVG attribute name value pairs to apply to labels
value_formatter(visigoth.utils.ValueFormatter): control the way values are represented
Note:
x column must be continuous (not str)
y column must be discrete (integer or str)
parameters x, y and hue are all required
"""
def __init__(self, data, x=0, y=1, hue=None, size=None, width=512, height=512, hue_manager=None, stroke=None,
stroke_width=1, font_height=12, spacing_fraction=0.4, text_attributes={}, value_formatter=None):
super().__init__(width=width, height=height, stroke=stroke, stroke_width=stroke_width, font_height=font_height,
text_attributes=text_attributes, value_formatter=value_formatter)
self.dataset = Dataset(data)
self.dataset.checkColumn("x",x, [float, int, datetime.timedelta, datetime.datetime],required=True)
self.dataset.checkColumn("y",y, [str, int],required=True)
self.dataset.checkColumn("hue", hue, [float, str, int],required=True)
self.dataset.checkColumn("size", size, [float, int], required=False)
self.setDrawGrid(True)
self.x = x
self.y = y
self.hue = hue
self.size = size
self.spacing_fraction = spacing_fraction
if not hue_manager:
if self.hue is None or self.dataset.isDiscrete(self.hue):
hue_manager = DiscreteHueManager()
else:
hue_manager = ContinuousHueManager()
self.setHueManager(hue_manager)
self.huevals = []
for v in self.dataset.query([self.hue], unique=True, flatten=True):
if v is not None:
self.getHueManager().allocateHue(v)
self.huevals.append(v)
self.yvals = []
y_min_x = self.dataset.query([self.y], aggregations=[Dataset.min(self.x)])
y_min_x.sort(key=lambda t:t[1])
for (y,_) in y_min_x:
self.yvals.append(y)
(self.xmin,self.xmax) = self.dataset.query([],aggregations=[Dataset.min(self.x),Dataset.max(self.x)],flatten=True,raw=True)
y_axis = DiscreteAxis(self.height, "vertical", discrete_values=self.yvals,
value_formatter=self.getValueFormatter())
x_axis = ContinuousAxis(self.height, "horizontal", min_value=self.xmin, max_value=self.xmax,
value_formatter=self.getValueFormatter())
if isinstance(self.x, str):
x_axis.setLabel(self.x)
if isinstance(self.y, str):
y_axis.setLabel(self.y)
self.setAxes(x_axis, y_axis)
self.smax = None
if self.size is not None:
(self.smax,) = self.dataset.query([], aggregations=[Dataset.max(self.size)],
flatten=True, raw=True)
self.setTooltipFunction(lambda cat,val: self.getValueFormatter().toText(cat[0]) + " - " + self.getValueFormatter().toText(cat[1]) + " : " + str(val))
def build(self, fmt):
super().build(fmt)
self.hue_manager.addEventConsumer(self, "hue")
self.hue_manager.addEventProducer(self, "hue")
def drawChart(self, doc, cx, cy, width, height):
barcount = len(self.yvals)
barwidth = (1-self.spacing_fraction) * (height / barcount)
hm = self.getHueManager()
categories = {}
for index in range(barcount):
yval = self.yvals[index]
x_hues = self.dataset.query([self.x,self.hue,self.size],filters=[Dataset.filter(self.y,"=",yval)],raw=True)
print(x_hues)
x_hues.sort(key=lambda x:x[0])
by = self.computeY(yval)
pbx = None
px = None
psz = None
phue = None
for (x,hue,sz) in x_hues:
bx = self.computeX(x)
if pbx is not None and phue is not None:
width = barwidth
fill_hue = hm.getHue(phue)
stroke_hue = self.stroke if self.stroke is not None else fill_hue
if psz is not None:
width *= psz/self.smax
r = rectangle(pbx, by-width*0.5, bx-pbx, width, fill_hue, stroke=stroke_hue,
stroke_width=self.stroke_width, tooltip=self.getTooltip((px,x), phue))
rid = r.getId()
if phue not in categories:
categories[phue] = [rid]
else:
categories[phue].append(rid)
doc.add(r)
phue = hue
pbx = bx
px = x
psz = sz
return {"categories": categories} | [
"visigoth.utils.ContinuousHueManager",
"visigoth.utils.DiscreteHueManager",
"visigoth.internal.utils.data.Dataset.max",
"visigoth.internal.utils.data.Dataset.min",
"visigoth.internal.utils.data.Dataset.filter",
"visigoth.internal.utils.data.Dataset"
] | [((3305, 3318), 'visigoth.internal.utils.data.Dataset', 'Dataset', (['data'], {}), '(data)\n', (3312, 3318), False, 'from visigoth.internal.utils.data import Dataset\n'), ((3944, 3964), 'visigoth.utils.DiscreteHueManager', 'DiscreteHueManager', ([], {}), '()\n', (3962, 3964), False, 'from visigoth.utils import DiscreteHueManager, ContinuousHueManager\n'), ((4013, 4035), 'visigoth.utils.ContinuousHueManager', 'ContinuousHueManager', ([], {}), '()\n', (4033, 4035), False, 'from visigoth.utils import DiscreteHueManager, ContinuousHueManager\n'), ((4386, 4405), 'visigoth.internal.utils.data.Dataset.min', 'Dataset.min', (['self.x'], {}), '(self.x)\n', (4397, 4405), False, 'from visigoth.internal.utils.data import Dataset\n'), ((4580, 4599), 'visigoth.internal.utils.data.Dataset.min', 'Dataset.min', (['self.x'], {}), '(self.x)\n', (4591, 4599), False, 'from visigoth.internal.utils.data import Dataset\n'), ((4600, 4619), 'visigoth.internal.utils.data.Dataset.max', 'Dataset.max', (['self.x'], {}), '(self.x)\n', (4611, 4619), False, 'from visigoth.internal.utils.data import Dataset\n'), ((5281, 5303), 'visigoth.internal.utils.data.Dataset.max', 'Dataset.max', (['self.size'], {}), '(self.size)\n', (5292, 5303), False, 'from visigoth.internal.utils.data import Dataset\n'), ((6079, 6112), 'visigoth.internal.utils.data.Dataset.filter', 'Dataset.filter', (['self.y', '"""="""', 'yval'], {}), "(self.y, '=', yval)\n", (6093, 6112), False, 'from visigoth.internal.utils.data import Dataset\n')] |
# coding: utf-8
from __future__ import unicode_literals
from ansible.parsing.dataloader import DataLoader
from ansible.template import Templar
import pytest
import os
import testinfra.utils.ansible_runner
import pprint
pp = pprint.PrettyPrinter()
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def base_directory():
""" ... """
cwd = os.getcwd()
if('group_vars' in os.listdir(cwd)):
directory = "../.."
molecule_directory = "."
else:
directory = "."
molecule_directory = "molecule/{}".format(os.environ.get('MOLECULE_SCENARIO_NAME'))
return directory, molecule_directory
@pytest.fixture()
def get_vars(host):
"""
parse ansible variables
- defaults/main.yml
- vars/main.yml
- molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml
"""
base_dir, molecule_dir = base_directory()
file_defaults = "file={}/defaults/main.yml name=role_defaults".format(base_dir)
file_vars = "file={}/vars/main.yml name=role_vars".format(base_dir)
file_molecule = "file={}/group_vars/all/vars.yml name=test_vars".format(molecule_dir)
defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults")
vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars")
molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars")
ansible_vars = defaults_vars
ansible_vars.update(vars_vars)
ansible_vars.update(molecule_vars)
templar = Templar(loader=DataLoader(), variables=ansible_vars)
result = templar.template(ansible_vars, fail_on_undefined=False)
return result
def test_directories(host):
"""
test existing directories
"""
distribution = host.system_info.distribution
directories = []
if(distribution in ['debian', 'ubuntu']):
directories.append("/usr/lib/chromium")
elif(distribution in ['centos', 'redhat']):
directories.append("/usr/lib64/chromium-browser")
for dirs in directories:
d = host.file(dirs)
assert d.is_directory
assert d.exists
def test_files(host):
"""
test existing files
"""
distribution = host.system_info.distribution
files = []
files.append("/etc/profile.d/chromedriver.sh")
if(distribution in ['debian', 'ubuntu']):
files.append("/usr/bin/chromedriver")
files.append("/usr/lib/chromium/chrome-sandbox")
elif(distribution in ['centos', 'redhat']):
files.append("/usr/lib64/chromium-browser/chromedriver")
for _file in files:
f = host.file(_file)
assert f.exists
assert f.is_file
def test_profile(host):
config_file = "/etc/profile.d/chromedriver.sh"
content = host.file(config_file).content_string
assert 'DISPLAY=":20.0"' in content
assert 'SCREEN_GEOMETRY="1440x900x24"' in content
assert 'CHROMEDRIVER_PORT=4444' in content
assert 'CHROMEDRIVER_WHITELISTED_IPS="127.0.0.1"' in content
assert 'CHROMEDRIVER_URL_BASE=""' in content
assert 'CHROMEDRIVER_EXTRA_ARGS=""' in content
| [
"os.listdir",
"os.environ.get",
"os.getcwd",
"pprint.PrettyPrinter",
"pytest.fixture",
"ansible.parsing.dataloader.DataLoader"
] | [((228, 250), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (248, 250), False, 'import pprint\n'), ((712, 728), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (726, 728), False, 'import pytest\n'), ((426, 437), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (435, 437), False, 'import os\n'), ((462, 477), 'os.listdir', 'os.listdir', (['cwd'], {}), '(cwd)\n', (472, 477), False, 'import os\n'), ((625, 665), 'os.environ.get', 'os.environ.get', (['"""MOLECULE_SCENARIO_NAME"""'], {}), "('MOLECULE_SCENARIO_NAME')\n", (639, 665), False, 'import os\n'), ((1652, 1664), 'ansible.parsing.dataloader.DataLoader', 'DataLoader', ([], {}), '()\n', (1662, 1664), False, 'from ansible.parsing.dataloader import DataLoader\n')] |
import struct
import vertex_builder
def test_subset_write():
builder = vertex_builder.builder('2f 2i', 10)
assert builder.read() == b''
chunk_1 = struct.pack('2f', 1.0, 2.0)
chunk_2 = struct.pack('2i', 3, 4)
chunk_3 = struct.pack('2f', -1.0, 0.0)
chunk_4 = struct.pack('2i', 1024, -256)
subset_1 = builder.subset(0)
subset_2 = builder.subset(1)
subset_1.write(chunk_1 + chunk_3)
subset_2.write(chunk_2 + chunk_4)
assert builder.read() == chunk_1 + chunk_2 + chunk_3 + chunk_4
| [
"vertex_builder.builder",
"struct.pack"
] | [((77, 112), 'vertex_builder.builder', 'vertex_builder.builder', (['"""2f 2i"""', '(10)'], {}), "('2f 2i', 10)\n", (99, 112), False, 'import vertex_builder\n'), ((161, 188), 'struct.pack', 'struct.pack', (['"""2f"""', '(1.0)', '(2.0)'], {}), "('2f', 1.0, 2.0)\n", (172, 188), False, 'import struct\n'), ((203, 226), 'struct.pack', 'struct.pack', (['"""2i"""', '(3)', '(4)'], {}), "('2i', 3, 4)\n", (214, 226), False, 'import struct\n'), ((241, 269), 'struct.pack', 'struct.pack', (['"""2f"""', '(-1.0)', '(0.0)'], {}), "('2f', -1.0, 0.0)\n", (252, 269), False, 'import struct\n'), ((284, 313), 'struct.pack', 'struct.pack', (['"""2i"""', '(1024)', '(-256)'], {}), "('2i', 1024, -256)\n", (295, 313), False, 'import struct\n')] |
"""
Notation 3 (N3) RDF graph serializer for RDFLib.
"""
from rdflib.graph import Graph
from rdflib.namespace import OWL, Namespace
from rdflib.plugins.serializers.turtle import OBJECT, SUBJECT, TurtleSerializer
__all__ = ["N3Serializer"]
SWAP_LOG = Namespace("http://www.w3.org/2000/10/swap/log#")
class N3Serializer(TurtleSerializer):
short_name = "n3"
def __init__(self, store: Graph, parent=None):
super(N3Serializer, self).__init__(store)
self.keywords.update({OWL.sameAs: "=", SWAP_LOG.implies: "=>"})
self.parent = parent
def reset(self):
super(N3Serializer, self).reset()
self._stores = {}
def endDocument(self):
if not self.parent:
super(N3Serializer, self).endDocument()
def indent(self, modifier=0):
indent = super(N3Serializer, self).indent(modifier)
if self.parent is not None:
indent += self.parent.indent() # modifier)
return indent
def preprocessTriple(self, triple):
super(N3Serializer, self).preprocessTriple(triple)
if isinstance(triple[0], Graph):
for t in triple[0]:
self.preprocessTriple(t)
if isinstance(triple[1], Graph):
for t in triple[1]:
self.preprocessTriple(t)
if isinstance(triple[2], Graph):
for t in triple[2]:
self.preprocessTriple(t)
def getQName(self, uri, gen_prefix=True):
qname = None
if self.parent is not None:
qname = self.parent.getQName(uri, gen_prefix)
if qname is None:
qname = super(N3Serializer, self).getQName(uri, gen_prefix)
return qname
def statement(self, subject):
self.subjectDone(subject)
properties = self.buildPredicateHash(subject)
if len(properties) == 0:
return False
return self.s_clause(subject) or super(N3Serializer, self).statement(subject)
def path(self, node, position, newline=False):
if not self.p_clause(node, position):
super(N3Serializer, self).path(node, position, newline)
def s_clause(self, subject):
if isinstance(subject, Graph):
self.write("\n" + self.indent())
self.p_clause(subject, SUBJECT)
self.predicateList(subject)
self.write(" .")
return True
else:
return False
def p_clause(self, node, position):
if isinstance(node, Graph):
self.subjectDone(node)
if position is OBJECT:
self.write(" ")
self.write("{")
self.depth += 1
serializer = N3Serializer(node, parent=self)
serializer.serialize(self.stream)
self.depth -= 1
self.write(self.indent() + "}")
return True
else:
return False
| [
"rdflib.namespace.Namespace"
] | [((252, 300), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://www.w3.org/2000/10/swap/log#"""'], {}), "('http://www.w3.org/2000/10/swap/log#')\n", (261, 300), False, 'from rdflib.namespace import OWL, Namespace\n')] |
from .test_adjective import make_beag
from .test_noun import make_ainm
from pygramadan.noun_phrase import NP, example_xml
from pygramadan.attributes import Gender
import io
FEAR_POIST_XML = example_xml()
def test_read_xml():
sio = io.StringIO(FEAR_POIST_XML)
fear_poist = NP(source=sio)
assert fear_poist.get_lemma() == 'fear poist'
assert fear_poist.get_gender() == Gender.Masc
assert fear_poist.pl_gen_art[0].value == 'na bhfear poist'
def test_noun_adj():
beag = make_beag()
ainm_beag = NP(noun=make_ainm(), adjective=beag)
assert len(ainm_beag.sg_nom) == 1
assert ainm_beag.sg_gen_art[0].value == 'an ainm bhig'
assert ainm_beag.get_lemma() == 'ainm beag'
def test_get_all_forms():
sio = io.StringIO(FEAR_POIST_XML)
fear_poist = NP(source=sio)
fp_list = fear_poist.get_all_forms()
exp = [('sg_nom', 'fear poist'), ('sg_gen', 'fir phoist'),
('sg_gen_art', 'an fhir phoist'), ('sg_nom_art', 'an fear poist'),
('pl_gen', 'fear poist'), ('pl_nom_art', 'na fir phoist'),
('pl_gen_art', 'na bhfear poist'), ('pl_nom', 'fir phoist')]
fp_list.sort()
exp.sort()
assert fp_list == exp
| [
"io.StringIO",
"pygramadan.noun_phrase.example_xml",
"pygramadan.noun_phrase.NP"
] | [((192, 205), 'pygramadan.noun_phrase.example_xml', 'example_xml', ([], {}), '()\n', (203, 205), False, 'from pygramadan.noun_phrase import NP, example_xml\n'), ((239, 266), 'io.StringIO', 'io.StringIO', (['FEAR_POIST_XML'], {}), '(FEAR_POIST_XML)\n', (250, 266), False, 'import io\n'), ((284, 298), 'pygramadan.noun_phrase.NP', 'NP', ([], {'source': 'sio'}), '(source=sio)\n', (286, 298), False, 'from pygramadan.noun_phrase import NP, example_xml\n'), ((744, 771), 'io.StringIO', 'io.StringIO', (['FEAR_POIST_XML'], {}), '(FEAR_POIST_XML)\n', (755, 771), False, 'import io\n'), ((789, 803), 'pygramadan.noun_phrase.NP', 'NP', ([], {'source': 'sio'}), '(source=sio)\n', (791, 803), False, 'from pygramadan.noun_phrase import NP, example_xml\n')] |
from sklearn.utils import class_weight
import src.util as util
from sklearn.ensemble import RandomForestClassifier
from time import time
from src.metrics import print_metrics, get_metrics
import sys
def random_forest(path: str, max_depth, n_trees, type):
st = time()
if type == "bernoulli":
X_train, X_test, y_train, y_test = util.load_data_bow(path, True, 1, 1)
elif type == "multinomial":
X_train, X_test, y_train, y_test = util.load_data_bow(path, False, 1, 1)
elif type == "word2vec":
X_train, X_test, y_train, y_test = util.load_data_word2vec_sentence_tfidf(path)
else:
raise RuntimeError("third argument must be `bernoulli`, 'multinomial' or `word2vec`")
model = RandomForestClassifier(
n_estimators=n_trees, max_depth=max_depth, verbose=1, n_jobs=-1
)
print("Fitting model")
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
metrics = get_metrics(y_pred, y_test)
print_metrics(metrics)
util.save_model(model, f"random_forest_{max_depth}_{n_trees}_{type}.sav")
print(f"Time: {time()-st}s")
def main():
args = sys.argv
max_depth = int(args[1])
n_trees = int(args[2])
type = args[3]
path = args[4]
random_forest(path, max_depth, n_trees, type)
if __name__ == "__main__":
main()
| [
"src.util.load_data_bow",
"sklearn.ensemble.RandomForestClassifier",
"src.util.save_model",
"src.metrics.get_metrics",
"src.util.load_data_word2vec_sentence_tfidf",
"time.time",
"src.metrics.print_metrics"
] | [((266, 272), 'time.time', 'time', ([], {}), '()\n', (270, 272), False, 'from time import time\n'), ((728, 819), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_trees', 'max_depth': 'max_depth', 'verbose': '(1)', 'n_jobs': '(-1)'}), '(n_estimators=n_trees, max_depth=max_depth, verbose=1,\n n_jobs=-1)\n', (750, 819), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((939, 966), 'src.metrics.get_metrics', 'get_metrics', (['y_pred', 'y_test'], {}), '(y_pred, y_test)\n', (950, 966), False, 'from src.metrics import print_metrics, get_metrics\n'), ((971, 993), 'src.metrics.print_metrics', 'print_metrics', (['metrics'], {}), '(metrics)\n', (984, 993), False, 'from src.metrics import print_metrics, get_metrics\n'), ((998, 1071), 'src.util.save_model', 'util.save_model', (['model', 'f"""random_forest_{max_depth}_{n_trees}_{type}.sav"""'], {}), "(model, f'random_forest_{max_depth}_{n_trees}_{type}.sav')\n", (1013, 1071), True, 'import src.util as util\n'), ((344, 380), 'src.util.load_data_bow', 'util.load_data_bow', (['path', '(True)', '(1)', '(1)'], {}), '(path, True, 1, 1)\n', (362, 380), True, 'import src.util as util\n'), ((456, 493), 'src.util.load_data_bow', 'util.load_data_bow', (['path', '(False)', '(1)', '(1)'], {}), '(path, False, 1, 1)\n', (474, 493), True, 'import src.util as util\n'), ((566, 610), 'src.util.load_data_word2vec_sentence_tfidf', 'util.load_data_word2vec_sentence_tfidf', (['path'], {}), '(path)\n', (604, 610), True, 'import src.util as util\n'), ((1092, 1098), 'time.time', 'time', ([], {}), '()\n', (1096, 1098), False, 'from time import time\n')] |
import pymongo
from bson.objectid import ObjectId
from getpass import getpass
from sys import argv
import csv
if __name__ == '__main__':
passwd = getpass ("Password: ")
mongo = pymongo.Connection ('localhost', 27017)['db_skapes']
mongo.authenticate('skapes', passwd)
results = mongo.orig_events.find ()
for item in results:
if item['Path_BNGenus'] == 'Unspecified' and item['Path_BNSpp'] == 'Unspecified':
name = item['Path_CName']
else:
name = item['Path_BNGenus'] + ' ' + item['Path_BNSpp']
mongo.events.update ({
'ref': item['_id'],
}, {
'$set': {
'event_name': ' - '.join ([item['PathEmerge_Date_Yr'], item['PathEmerge_Location'], name])
}
});
| [
"pymongo.Connection",
"getpass.getpass"
] | [((153, 174), 'getpass.getpass', 'getpass', (['"""Password: """'], {}), "('Password: ')\n", (160, 174), False, 'from getpass import getpass\n'), ((188, 226), 'pymongo.Connection', 'pymongo.Connection', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (206, 226), False, 'import pymongo\n')] |
import warnings
def saveit(func):
"""A decorator that caches the return value of a function"""
name = '_' + func.__name__
def _wrapper(self, *args, **kwds):
if not hasattr(self, name):
setattr(self, name, func(self, *args, **kwds))
return getattr(self, name)
return _wrapper
cacheit = saveit
def prevent_recursion(default):
"""A decorator that returns the return value of `default` in recursions"""
def decorator(func):
name = '_calling_%s_' % func.__name__
def newfunc(self, *args, **kwds):
if getattr(self, name, False):
return default()
setattr(self, name, True)
try:
return func(self, *args, **kwds)
finally:
setattr(self, name, False)
return newfunc
return decorator
def ignore_exception(exception_class):
"""A decorator that ignores `exception_class` exceptions"""
def _decorator(func):
def newfunc(*args, **kwds):
try:
return func(*args, **kwds)
except exception_class:
pass
return newfunc
return _decorator
def deprecated(message=None):
"""A decorator for deprecated functions"""
def _decorator(func, message=message):
if message is None:
message = '%s is deprecated' % func.__name__
def newfunc(*args, **kwds):
warnings.warn(message, DeprecationWarning, stacklevel=2)
return func(*args, **kwds)
return newfunc
return _decorator
def cached(count):
"""A caching decorator based on parameter objects"""
def decorator(func):
return _Cached(func, count)
return decorator
class _Cached(object):
def __init__(self, func, count):
self.func = func
self.cache = []
self.count = count
def __call__(self, *args, **kwds):
key = (args, kwds)
for cached_key, cached_result in self.cache:
if cached_key == key:
return cached_result
result = self.func(*args, **kwds)
self.cache.append((key, result))
if len(self.cache) > self.count:
del self.cache[0]
return result
| [
"warnings.warn"
] | [((1441, 1497), 'warnings.warn', 'warnings.warn', (['message', 'DeprecationWarning'], {'stacklevel': '(2)'}), '(message, DeprecationWarning, stacklevel=2)\n', (1454, 1497), False, 'import warnings\n')] |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
gaussian_conjugate_posteriors = tf.contrib.distributions.gaussian_conjugate_posteriors # pylint: disable=line-too-long
class GaussianTest(tf.test.TestCase):
def testGaussianConjugateKnownSigmaPosterior(self):
with tf.Session():
mu0 = tf.constant(3.0)
sigma0 = tf.constant(math.sqrt(1/0.1))
sigma = tf.constant(math.sqrt(1/0.5))
x = tf.constant([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0])
s = tf.reduce_sum(x)
n = tf.size(x)
prior = tf.contrib.distributions.Gaussian(mu=mu0, sigma=sigma0)
posterior = gaussian_conjugate_posteriors.known_sigma_posterior(
prior=prior, sigma=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(posterior, tf.contrib.distributions.Gaussian))
posterior_log_pdf = posterior.log_pdf(x).eval()
self.assertEqual(posterior_log_pdf.shape, (6,))
def testGaussianConjugateKnownSigmaPredictive(self):
with tf.Session():
mu0 = tf.constant(3.0)
sigma0 = tf.constant(math.sqrt(1/0.1))
sigma = tf.constant(math.sqrt(1/0.5))
x = tf.constant([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0])
s = tf.reduce_sum(x)
n = tf.size(x)
prior = tf.contrib.distributions.Gaussian(mu=mu0, sigma=sigma0)
predictive = gaussian_conjugate_posteriors.known_sigma_predictive(
prior=prior, sigma=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(predictive, tf.contrib.distributions.Gaussian))
predictive_log_pdf = predictive.log_pdf(x).eval()
self.assertEqual(predictive_log_pdf.shape, (6,))
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.reduce_sum",
"tensorflow.Session",
"math.sqrt",
"tensorflow.test.main",
"tensorflow.constant",
"tensorflow.size",
"tensorflow.contrib.distributions.Gaussian"
] | [((2443, 2457), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (2455, 2457), True, 'import tensorflow as tf\n'), ((1080, 1092), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1090, 1092), True, 'import tensorflow as tf\n'), ((1106, 1122), 'tensorflow.constant', 'tf.constant', (['(3.0)'], {}), '(3.0)\n', (1117, 1122), True, 'import tensorflow as tf\n'), ((1222, 1267), 'tensorflow.constant', 'tf.constant', (['[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]'], {}), '([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0])\n', (1233, 1267), True, 'import tensorflow as tf\n'), ((1278, 1294), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x'], {}), '(x)\n', (1291, 1294), True, 'import tensorflow as tf\n'), ((1305, 1315), 'tensorflow.size', 'tf.size', (['x'], {}), '(x)\n', (1312, 1315), True, 'import tensorflow as tf\n'), ((1330, 1385), 'tensorflow.contrib.distributions.Gaussian', 'tf.contrib.distributions.Gaussian', ([], {'mu': 'mu0', 'sigma': 'sigma0'}), '(mu=mu0, sigma=sigma0)\n', (1363, 1385), True, 'import tensorflow as tf\n'), ((1776, 1788), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1786, 1788), True, 'import tensorflow as tf\n'), ((1802, 1818), 'tensorflow.constant', 'tf.constant', (['(3.0)'], {}), '(3.0)\n', (1813, 1818), True, 'import tensorflow as tf\n'), ((1918, 1963), 'tensorflow.constant', 'tf.constant', (['[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]'], {}), '([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0])\n', (1929, 1963), True, 'import tensorflow as tf\n'), ((1974, 1990), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x'], {}), '(x)\n', (1987, 1990), True, 'import tensorflow as tf\n'), ((2001, 2011), 'tensorflow.size', 'tf.size', (['x'], {}), '(x)\n', (2008, 2011), True, 'import tensorflow as tf\n'), ((2026, 2081), 'tensorflow.contrib.distributions.Gaussian', 'tf.contrib.distributions.Gaussian', ([], {'mu': 'mu0', 'sigma': 'sigma0'}), '(mu=mu0, sigma=sigma0)\n', (2059, 2081), True, 'import tensorflow as tf\n'), ((1150, 1168), 'math.sqrt', 'math.sqrt', (['(1 / 0.1)'], {}), '(1 / 0.1)\n', (1159, 1168), False, 'import math\n'), ((1194, 1212), 'math.sqrt', 'math.sqrt', (['(1 / 0.5)'], {}), '(1 / 0.5)\n', (1203, 1212), False, 'import math\n'), ((1846, 1864), 'math.sqrt', 'math.sqrt', (['(1 / 0.1)'], {}), '(1 / 0.1)\n', (1855, 1864), False, 'import math\n'), ((1890, 1908), 'math.sqrt', 'math.sqrt', (['(1 / 0.5)'], {}), '(1 / 0.5)\n', (1899, 1908), False, 'import math\n')] |
from typing import Any, Dict, List
import pytest
from morecontext import itemrollback
def test_itemrollback_nop() -> None:
d = {"foo": 42}
with itemrollback(d, "foo"):
assert d["foo"] == 42
assert d["foo"] == 42
def test_itemrollback_nop_error() -> None:
d = {"foo": 42}
with pytest.raises(RuntimeError, match="Catch this!"):
with itemrollback(d, "foo"):
assert d["foo"] == 42
raise RuntimeError("Catch this!")
assert d["foo"] == 42
def test_itemrollback_modify() -> None:
d: Dict[str, Any] = {"foo": 42}
with itemrollback(d, "foo"):
assert d["foo"] == 42
d["foo"] = [3.14]
assert d["foo"] == 42
def test_itemrollback_modify_error() -> None:
d: Dict[str, Any] = {"foo": 42}
with pytest.raises(RuntimeError, match="Catch this!"):
with itemrollback(d, "foo"):
assert d["foo"] == 42
d["foo"] = [3.14]
raise RuntimeError("Catch this!")
assert d["foo"] == 42
def test_itemrollback_del() -> None:
d = {"foo": 42}
with itemrollback(d, "foo"):
assert d["foo"] == 42
del d["foo"]
assert d["foo"] == 42
def test_itemrollback_del_error() -> None:
d = {"foo": 42}
with pytest.raises(RuntimeError, match="Catch this!"):
with itemrollback(d, "foo"):
assert d["foo"] == 42
del d["foo"]
raise RuntimeError("Catch this!")
assert d["foo"] == 42
def test_itemrollback_unset() -> None:
d = {"foo": 42}
with itemrollback(d, "bar"):
assert "bar" not in d
assert "bar" not in d
def test_itemrollback_unset_error() -> None:
d = {"foo": 42}
with pytest.raises(RuntimeError, match="Catch this!"):
with itemrollback(d, "bar"):
assert "bar" not in d
raise RuntimeError("Catch this!")
assert "bar" not in d
def test_itemrollback_unset_modify() -> None:
d: Dict[str, Any] = {"foo": 42}
with itemrollback(d, "bar"):
assert "bar" not in d
d["bar"] = [3.14]
assert "bar" not in d
def test_itemrollback_unset_modify_error() -> None:
d: Dict[str, Any] = {"foo": 42}
with pytest.raises(RuntimeError, match="Catch this!"):
with itemrollback(d, "bar"):
assert "bar" not in d
d["bar"] = [3.14]
raise RuntimeError("Catch this!")
assert "bar" not in d
def test_itemrollback_no_copy() -> None:
d: Dict[str, Dict[str, List[Any]]] = {
"foo": {"bar": [1, 2, 3], "quux": ["a", "b", "c"]}
}
with itemrollback(d, "foo"):
d["foo"]["bar"].append(4)
d["foo"]["quux"] = ["x", "y", "z"]
assert d["foo"] == {"bar": [1, 2, 3, 4], "quux": ["x", "y", "z"]}
def test_itemrollback_no_copy_error() -> None:
d: Dict[str, Dict[str, List[Any]]] = {
"foo": {"bar": [1, 2, 3], "quux": ["a", "b", "c"]}
}
with pytest.raises(RuntimeError, match="Catch this!"):
with itemrollback(d, "foo"):
d["foo"]["bar"].append(4)
d["foo"]["quux"] = ["x", "y", "z"]
raise RuntimeError("Catch this!")
assert d["foo"] == {"bar": [1, 2, 3, 4], "quux": ["x", "y", "z"]}
def test_itemrollback_copy() -> None:
d: Dict[str, Dict[str, List[Any]]] = {
"foo": {"bar": [1, 2, 3], "quux": ["a", "b", "c"]}
}
with itemrollback(d, "foo", copy=True):
d["foo"]["bar"].append(4)
d["foo"]["quux"] = ["x", "y", "z"]
assert d["foo"] == {"bar": [1, 2, 3, 4], "quux": ["a", "b", "c"]}
def test_itemrollback_copy_error() -> None:
d: Dict[str, Dict[str, List[Any]]] = {
"foo": {"bar": [1, 2, 3], "quux": ["a", "b", "c"]}
}
with pytest.raises(RuntimeError, match="Catch this!"):
with itemrollback(d, "foo", copy=True):
d["foo"]["bar"].append(4)
d["foo"]["quux"] = ["x", "y", "z"]
raise RuntimeError("Catch this!")
assert d["foo"] == {"bar": [1, 2, 3, 4], "quux": ["a", "b", "c"]}
@pytest.mark.parametrize("copy", [False, True])
def test_itemrollback_deepcopy(copy: bool) -> None:
d: Dict[str, Dict[str, List[Any]]] = {
"foo": {"bar": [1, 2, 3], "quux": ["a", "b", "c"]}
}
with itemrollback(d, "foo", copy=copy, deepcopy=True):
d["foo"]["bar"].append(4)
d["foo"]["quux"] = ["x", "y", "z"]
assert d["foo"] == {"bar": [1, 2, 3], "quux": ["a", "b", "c"]}
@pytest.mark.parametrize("copy", [False, True])
def test_itemrollback_deepcopy_error(copy: bool) -> None:
d: Dict[str, Dict[str, List[Any]]] = {
"foo": {"bar": [1, 2, 3], "quux": ["a", "b", "c"]}
}
with pytest.raises(RuntimeError, match="Catch this!"):
with itemrollback(d, "foo", copy=copy, deepcopy=True):
d["foo"]["bar"].append(4)
d["foo"]["quux"] = ["x", "y", "z"]
raise RuntimeError("Catch this!")
assert d["foo"] == {"bar": [1, 2, 3], "quux": ["a", "b", "c"]}
| [
"pytest.mark.parametrize",
"pytest.raises",
"morecontext.itemrollback"
] | [((4000, 4046), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""copy"""', '[False, True]'], {}), "('copy', [False, True])\n", (4023, 4046), False, 'import pytest\n'), ((4413, 4459), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""copy"""', '[False, True]'], {}), "('copy', [False, True])\n", (4436, 4459), False, 'import pytest\n'), ((154, 176), 'morecontext.itemrollback', 'itemrollback', (['d', '"""foo"""'], {}), "(d, 'foo')\n", (166, 176), False, 'from morecontext import itemrollback\n'), ((308, 356), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Catch this!"""'}), "(RuntimeError, match='Catch this!')\n", (321, 356), False, 'import pytest\n'), ((588, 610), 'morecontext.itemrollback', 'itemrollback', (['d', '"""foo"""'], {}), "(d, 'foo')\n", (600, 610), False, 'from morecontext import itemrollback\n'), ((787, 835), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Catch this!"""'}), "(RuntimeError, match='Catch this!')\n", (800, 835), False, 'import pytest\n'), ((1078, 1100), 'morecontext.itemrollback', 'itemrollback', (['d', '"""foo"""'], {}), "(d, 'foo')\n", (1090, 1100), False, 'from morecontext import itemrollback\n'), ((1253, 1301), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Catch this!"""'}), "(RuntimeError, match='Catch this!')\n", (1266, 1301), False, 'import pytest\n'), ((1541, 1563), 'morecontext.itemrollback', 'itemrollback', (['d', '"""bar"""'], {}), "(d, 'bar')\n", (1553, 1563), False, 'from morecontext import itemrollback\n'), ((1697, 1745), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Catch this!"""'}), "(RuntimeError, match='Catch this!')\n", (1710, 1745), False, 'import pytest\n'), ((1983, 2005), 'morecontext.itemrollback', 'itemrollback', (['d', '"""bar"""'], {}), "(d, 'bar')\n", (1995, 2005), False, 'from morecontext import itemrollback\n'), ((2188, 2236), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Catch this!"""'}), "(RuntimeError, match='Catch this!')\n", (2201, 2236), False, 'import pytest\n'), ((2571, 2593), 'morecontext.itemrollback', 'itemrollback', (['d', '"""foo"""'], {}), "(d, 'foo')\n", (2583, 2593), False, 'from morecontext import itemrollback\n'), ((2908, 2956), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Catch this!"""'}), "(RuntimeError, match='Catch this!')\n", (2921, 2956), False, 'import pytest\n'), ((3353, 3386), 'morecontext.itemrollback', 'itemrollback', (['d', '"""foo"""'], {'copy': '(True)'}), "(d, 'foo', copy=True)\n", (3365, 3386), False, 'from morecontext import itemrollback\n'), ((3698, 3746), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Catch this!"""'}), "(RuntimeError, match='Catch this!')\n", (3711, 3746), False, 'import pytest\n'), ((4216, 4264), 'morecontext.itemrollback', 'itemrollback', (['d', '"""foo"""'], {'copy': 'copy', 'deepcopy': '(True)'}), "(d, 'foo', copy=copy, deepcopy=True)\n", (4228, 4264), False, 'from morecontext import itemrollback\n'), ((4635, 4683), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Catch this!"""'}), "(RuntimeError, match='Catch this!')\n", (4648, 4683), False, 'import pytest\n'), ((371, 393), 'morecontext.itemrollback', 'itemrollback', (['d', '"""foo"""'], {}), "(d, 'foo')\n", (383, 393), False, 'from morecontext import itemrollback\n'), ((850, 872), 'morecontext.itemrollback', 'itemrollback', (['d', '"""foo"""'], {}), "(d, 'foo')\n", (862, 872), False, 'from morecontext import itemrollback\n'), ((1316, 1338), 'morecontext.itemrollback', 'itemrollback', (['d', '"""foo"""'], {}), "(d, 'foo')\n", (1328, 1338), False, 'from morecontext import itemrollback\n'), ((1760, 1782), 'morecontext.itemrollback', 'itemrollback', (['d', '"""bar"""'], {}), "(d, 'bar')\n", (1772, 1782), False, 'from morecontext import itemrollback\n'), ((2251, 2273), 'morecontext.itemrollback', 'itemrollback', (['d', '"""bar"""'], {}), "(d, 'bar')\n", (2263, 2273), False, 'from morecontext import itemrollback\n'), ((2971, 2993), 'morecontext.itemrollback', 'itemrollback', (['d', '"""foo"""'], {}), "(d, 'foo')\n", (2983, 2993), False, 'from morecontext import itemrollback\n'), ((3761, 3794), 'morecontext.itemrollback', 'itemrollback', (['d', '"""foo"""'], {'copy': '(True)'}), "(d, 'foo', copy=True)\n", (3773, 3794), False, 'from morecontext import itemrollback\n'), ((4698, 4746), 'morecontext.itemrollback', 'itemrollback', (['d', '"""foo"""'], {'copy': 'copy', 'deepcopy': '(True)'}), "(d, 'foo', copy=copy, deepcopy=True)\n", (4710, 4746), False, 'from morecontext import itemrollback\n')] |
#! /usr/bin/env python
#
# GOAL
# - load in lnL data
# - fit peak to quadratic (standard), GP, etc.
# - evaluate, based on some parameter grid
#
# FORMAT
# - pankow simplification of standard format
#
# COMPARE TO
# util_NRQuadraticFit.py
# postprocess_1d_cumulative
# util_QuadraticMassPosterior.py
#
import RIFT.interpolators.BayesianLeastSquares as BayesianLeastSquares
import argparse
import sys
import numpy as np
import numpy.lib.recfunctions
import scipy
import RIFT.lalsimutils as lalsimutils
import lalsimulation as lalsim
import lalframe
import lal
import functools
import itertools
no_plots = True
try:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.lines as mlines
import corner
no_plots=False
except ImportError:
print(" - no matplotlib - ")
from sklearn.preprocessing import PolynomialFeatures
import RIFT.misc.ModifiedScikitFit as msf # altenative polynomialFeatures
from sklearn import linear_model
from glue.ligolw import lsctables, utils, ligolw
lsctables.use_in(ligolw.LIGOLWContentHandler)
import RIFT.integrators.mcsampler as mcsampler
def render_coord(x):
if x in lalsimutils.tex_dictionary.keys():
return tex_dictionary[x]
if 'product(' in x:
a=x.replace(' ', '') # drop spaces
a = a[:len(a)-1] # drop last
a = a[8:]
terms = a.split(',')
exprs =map(render_coord, terms)
exprs = map( lambda x: x.replace('$', ''), exprs)
my_label = ' '.join(exprs)
return '$'+my_label+'$'
else:
return x
def render_coordinates(coord_names):
return map(render_coord, coord_names)
parser = argparse.ArgumentParser()
parser.add_argument("--maximize-mass",action='store_true', help="If true, maximize the likelihood for each value of the total mass. Ignore any grid placement in total mass")
parser.add_argument("--fname",help="filename of *.dat file [standard ILE output]")
parser.add_argument("--input-tides",action='store_true',help="Use input format with tidal fields included.")
parser.add_argument("--fname-xml-base",help="filename of xml file to use as base (e.g., to specify m1,m2, chi1, chi2, ... to minimize burden on ascii file")
parser.add_argument("--fname-parameter-grid",help="filename of ascii parameters to use to evaluate the fit")
parser.add_argument("--fname-out",default="eval.dat")
parser.add_argument("--fref",default=20,type=float, help="Reference frequency used for spins in the ILE output. (Since I usually use SEOBNRv3, the best choice is 20Hz)")
parser.add_argument("--fmin",type=float,default=20)
parser.add_argument("--fname-rom-samples",default=None,help="*.rom_composite output. Treated identically to set of posterior samples produced by mcsampler after constructing fit.")
parser.add_argument("--parameter", action='append', help="Parameters used as fitting parameters AND varied at a low level to make a posterior")
parser.add_argument("--parameter-implied", action='append', help="Parameter used in fit, but not independently varied for Monte Carlo")
parser.add_argument("--mc-range",default=None,help="Chirp mass range [mc1,mc2]. Important if we have a low-mass object, to avoid wasting time sampling elsewhere.")
parser.add_argument("--eta-range",default=None,help="Eta range. Important if we have a BNS or other item that has a strong constraint.")
parser.add_argument("--mtot-range",default=None,help="Chirp mass range [mc1,mc2]. Important if we have a low-mass object, to avoid wasting time sampling elsewhere.")
parser.add_argument("--trust-sample-parameter-box",action='store_true', help="If used, sets the prior range to the SAMPLE range for any parameters. NOT IMPLEMENTED. This should be automatically done for mc!")
parser.add_argument("--plots-do-not-force-large-range",action='store_true', help = "If used, the plots do NOT automatically set the chieff range to [-1,1], the eta range to [0,1/4], etc")
parser.add_argument("--downselect-parameter",action='append', help='Name of parameter to be used to eliminate grid points ')
parser.add_argument("--downselect-parameter-range",action='append',type=str)
parser.add_argument("--chi-max", default=1,type=float,help="Maximum range of 'a' allowed. Use when comparing to models that aren't calibrated to go to the Kerr limit.")
parser.add_argument("--parameter-nofit", action='append', help="Parameter used to initialize the implied parameters, and varied at a low level, but NOT the fitting parameters")
parser.add_argument("--use-precessing",action='store_true')
parser.add_argument("--lnL-offset",type=float,default=10,help="lnL offset")
parser.add_argument("--lnL-cut",type=float,default=None,help="lnL cut [MANUAL]")
parser.add_argument("--M-max-cut",type=float,default=1e5,help="Maximum mass to consider (e.g., if there is a cut on distance, this matters)")
parser.add_argument("--sigma-cut",type=float,default=0.6,help="Eliminate points with large error from the fit.")
parser.add_argument("--ignore-errors-in-data",action='store_true',help='Ignore reported error in lnL. Helpful for testing purposes (i.e., if the error is zero)')
parser.add_argument("--lnL-peak-insane-cut",type=float,default=np.inf,help="Throw away lnL greater than this value. Should not be necessary")
parser.add_argument("--verbose", action="store_true",default=False, help="Required to build post-frame-generating sanity-test plots")
parser.add_argument("--save-plots",default=False,action='store_true', help="Write plots to file (only useful for OSX, where interactive is default")
parser.add_argument("--inj-file", help="Name of injection file")
parser.add_argument("--event-num", type=int, default=0,help="Zero index of event in inj_file")
parser.add_argument("--report-best-point",action='store_true')
parser.add_argument("--adapt",action='store_true')
parser.add_argument("--fit-uses-reported-error",action='store_true')
parser.add_argument("--fit-uses-reported-error-factor",type=float,default=1,help="Factor to add to standard deviation of fit, before adding to lnL. Multiplies number fitting dimensions")
parser.add_argument("--n-max",default=3e5,type=float)
parser.add_argument("--n-eff",default=3e3,type=int)
parser.add_argument("--fit-method",default="quadratic",help="quadratic|polynomial|gp|gp_hyper")
parser.add_argument("--fit-order",type=int,default=2,help="Fit order (polynomial case: degree)")
parser.add_argument("--fit-uncertainty-added",default=False, action='store_true', help="Reported likelihood is lnL+(fit error). Use for placement and use of systematic errors.")
parser.add_argument("--no-plots",action='store_true')
opts= parser.parse_args()
no_plots = no_plots | opts.no_plots
with open('args.txt','w') as fp:
import sys
fp.write(' '.join(sys.argv))
if opts.fit_method == "quadratic":
opts.fit_order = 2 # overrride
###
### Comparison data (from LI)
###
remap_ILE_2_LI = {
"s1z":"a1z", "s2z":"a2z",
"s1x":"a1x", "s1y":"a1y",
"s2x":"a2x", "s2y":"a2y",
"chi1_perp":"chi1_perp",
"chi2_perp":"chi2_perp",
"chi1":'a1',
"chi2":'a2',
"cos_phiJL": 'cos_phiJL',
"sin_phiJL": 'sin_phiJL',
"cos_theta1":'costilt1',
"cos_theta2":'costilt2',
"theta1":"tilt1",
"theta2":"tilt2",
"xi":"chi_eff",
"chiMinus":"chi_minus",
"delta":"delta",
"mtot":'mtotal', "mc":"mc", "eta":"eta","m1":"m1","m2":"m2",
"cos_beta":"cosbeta",
"beta":"beta",
"LambdaTilde":"lambdat",
"DeltaLambdaTilde": "dlambdat"}
downselect_dict = {}
dlist = []
dlist_ranges=[]
if opts.downselect_parameter:
dlist = opts.downselect_parameter
dlist_ranges = map(eval,opts.downselect_parameter_range)
else:
dlist = []
dlist_ranges = []
if len(dlist) != len(dlist_ranges):
print(" downselect parameters inconsistent", dlist, dlist_ranges)
for indx in np.arange(len(dlist_ranges)):
downselect_dict[dlist[indx]] = dlist_ranges[indx]
chi_max = opts.chi_max
downselect_dict['chi1'] = [0,chi_max]
downselect_dict['chi2'] = [0,chi_max]
for param in ['s1z', 's2z', 's1x','s2x', 's1y', 's2y']:
downselect_dict[param] = [-chi_max,chi_max]
# Enforce definition of eta
downselect_dict['eta'] = [0,0.25]
test_converged={}
#test_converged['neff'] = functools.partial(mcsampler.convergence_test_MostSignificantPoint,0.01) # most significant point less than 1/neff of probability. Exactly equivalent to usual neff threshold.
#test_converged["normal_integral"] = functools.partial(mcsampler.convergence_test_NormalSubIntegrals, 25, 0.01, 0.1) # 20 sub-integrals are gaussian distributed [weakly; mainly to rule out outliers] *and* relative error < 10%, based on sub-integrals . Should use # of intervals << neff target from above. Note this sets our target error tolerance on lnLmarg. Note the specific test requires >= 20 sub-intervals, which demands *very many* samples (each subintegral needs to be converged).
prior_range_map = {"mtot": [1, 300], "q":[0.01,1], "s1z":[-0.999*chi_max,0.999*chi_max], "s2z":[-0.999*chi_max,0.999*chi_max], "mc":[0.9,250], "eta":[0.01,0.2499999], 'xi':[-chi_max,chi_max],'chi_eff':[-chi_max,chi_max],'delta':[-1,1],
's1x':[-chi_max,chi_max],
's2x':[-chi_max,chi_max],
's1y':[-chi_max,chi_max],
's2y':[-chi_max,chi_max],
'm1':[0.9,1e3],
'm2':[0.9,1e3],
'lambda1':[0.01,4000],
'lambda2':[0.01,4000],
# strongly recommend you do NOT use these as parameters! Only to insure backward compatibility with LI results
'LambdaTilde':[0.01,5000],
'DeltaLambdaTilde':[-500,500],
}
if not (opts.eta_range is None):
print(" Warning: Overriding default eta range. USE WITH CARE")
prior_range_map['eta'] = eval(opts.eta_range) # really only useful if eta is a coordinate. USE WITH CARE
# TeX dictionary
tex_dictionary = lalsimutils.tex_dictionary
###
### Linear fits. Resampling a quadratic. (Export me)
###
def fit_quadratic_alt(x,y,y_err=None,x0=None,symmetry_list=None,verbose=False):
gamma_x = None
if not (y_err is None):
gamma_x =1./np.power(y_err,2)
the_quadratic_results = BayesianLeastSquares.fit_quadratic( x, y,gamma_x=gamma_x,verbose=verbose)#x0=None)#x0_val_here)
peak_val_est, best_val_est, my_fisher_est, linear_term_est,fn_estimate = the_quadratic_results
np.savetxt("lnL_peakval.dat",[peak_val_est]) # generally not very useful
np.savetxt("lnL_bestpt.dat",best_val_est)
np.savetxt("lnL_gamma.dat",my_fisher_est,header=' '.join(coord_names))
bic =-2*( -0.5*np.sum(np.power((y - fn_estimate(x)),2))/2 - 0.5* len(y)*np.log(len(x[0])) )
print(" Fit: std :" , np.std( y-fn_estimate(x)))
print(" Fit: BIC :" , bic)
return fn_estimate
# https://github.com/scikit-learn/scikit-learn/blob/14031f6/sklearn/preprocessing/data.py#L1139
def fit_polynomial(x,y,x0=None,symmetry_list=None,y_errors=None):
"""
x = array so x[0] , x[1], x[2] are points.
"""
clf_list = []
bic_list = []
for indx in np.arange(opts.fit_order+1):
poly = msf.PolynomialFeatures(degree=indx,symmetry_list=symmetry_list)
X_ = poly.fit_transform(x)
if opts.verbose:
print(" Fit : poly: RAW :", poly.get_feature_names())
print(" Fit : ", poly.powers_)
# Strip things with inappropriate symmetry: IMPOSSIBLE
# powers_new = []
# if not(symmetry_list is None):
# for line in poly.powers_:
# signature = np.prod(np.power( np.array(symmetry_list), line))
# if signature >0:
# powers_new.append(line)
# poly.powers_ = powers_new
# X_ = poly.fit_transform(x) # refit, with symmetry-constrained structure
# print " Fit : poly: After symmetry constraint :", poly.get_feature_names()
# print " Fit : ", poly.powers_
clf = linear_model.LinearRegression()
if y_errors is None or opts.ignore_errors_in_data:
clf.fit(X_,y)
else:
assert len(y_errors) == len(y)
clf.fit(X_,y,sample_weight=1./y_errors**2) # fit with usual weights
clf_list.append(clf)
print(" Fit: Testing order ", indx)
print(" Fit: std: ", np.std(y - clf.predict(X_)), "using number of features ", len(y)) # should NOT be perfect
if not (y_errors is None):
print(" Fit: weighted error ", np.std( (y - clf.predict(X_))/y_errors))
bic = -2*( -0.5*np.sum(np.power(y - clf.predict(X_),2)) - 0.5*len(y)*np.log(len(x[0])))
print(" Fit: BIC:", bic)
bic_list.append(bic)
clf = clf_list[np.argmin(np.array(bic_list) )]
return lambda x: clf.predict(poly.fit_transform(x))
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel as C
def adderr(y):
val,err = y
return val+error_factor*err
def fit_gp(x,y,x0=None,symmetry_list=None,y_errors=None,hypercube_rescale=False):
"""
x = array so x[0] , x[1], x[2] are points.
"""
# Amplitude:
# - We are fitting lnL.
# - We know the scale more or less: more than 2 in the log is bad
# Scale
# - because of strong correlations with chirp mass, the length scales can be very short
# - they are rarely very long, but at high mass can be long
# - I need to allow for a RANGE
length_scale_est = []
length_scale_bounds_est = []
for indx in np.arange(len(x[0])):
# These length scales have been tuned by expereience
length_scale_est.append( 2*np.std(x[:,indx]) ) # auto-select range based on sampling retained
length_scale_min_here= np.max([1e-3,0.2*np.std(x[:,indx]/np.sqrt(len(x)))])
if indx == mc_index:
length_scale_min_here= 0.2*np.std(x[:,indx]/np.sqrt(len(x)))
print(" Setting mc range: retained point range is ", np.std(x[:,indx]), " and target min is ", length_scale_min_here)
length_scale_bounds_est.append( (length_scale_min_here , 5*np.std(x[:,indx]) ) ) # auto-select range based on sampling *RETAINED* (i.e., passing cut). Note that for the coordinates I usually use, it would be nonsensical to make the range in coordinate too small, as can occasionally happens
print(" GP: Estimated length scales ")
print(length_scale_est)
print(length_scale_bounds_est)
if not (hypercube_rescale):
# These parameters have been hand-tuned by experience to try to set to levels comparable to typical lnL Monte Carlo error
kernel = WhiteKernel(noise_level=0.1,noise_level_bounds=(1e-2,1))+C(0.5, (1e-3,1e1))*RBF(length_scale=length_scale_est, length_scale_bounds=length_scale_bounds_est)
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=8)
gp.fit(x,y)
print(" Fit: std: ", np.std(y - gp.predict(x)), "using number of features ", len(y))
if not (opts.fit_uncertainty_added):
return lambda x: gp.predict(x)
else:
return lambda x: adderr(gp.predict(x,return_std=True))
else:
x_scaled = np.zeros(x.shape)
x_center = np.zeros(len(length_scale_est))
x_center = np.mean(x)
print(" Scaling data to central point ", x_center)
for indx in np.arange(len(x)):
x_scaled[indx] = (x[indx] - x_center)/length_scale_est # resize
kernel = WhiteKernel(noise_level=0.1,noise_level_bounds=(1e-2,1))+C(0.5, (1e-3,1e1))*RBF( len(x_center), (1e-3,1e1))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=8)
gp.fit(x_scaled,y)
print(" Fit: std: ", np.std(y - gp.predict(x_scaled)), "using number of features ", len(y)) # should NOT be perfect
return lambda x,x0=x_center,scl=length_scale_est: gp.predict( (x-x0 )/scl)
coord_names = opts.parameter # Used in fit
if coord_names is None:
coord_names = []
low_level_coord_names = coord_names # Used for Monte Carlo
if opts.parameter_implied:
coord_names = coord_names+opts.parameter_implied
if opts.parameter_nofit:
if opts.parameter is None:
low_level_coord_names = opts.parameter_nofit # Used for Monte Carlo
else:
low_level_coord_names = opts.parameter+opts.parameter_nofit # Used for Monte Carlo
error_factor = len(coord_names)
if opts.fit_uses_reported_error:
error_factor=len(coord_names)*opts.fit_uses_reported_error_factor
print(" Coordinate names for fit :, ", coord_names)
print(" Rendering coordinate names : ", render_coordinates(coord_names)) # map(lambda x: tex_dictionary[x], coord_names)
print(" Symmetry for these fitting coordinates :", lalsimutils.symmetry_sign_exchange(coord_names))
print(" Coordinate names for Monte Carlo :, ", low_level_coord_names)
print(" Rendering coordinate names : ", map(lambda x: tex_dictionary[x], low_level_coord_names))
# initialize
dat_mass = []
weights = []
n_params = -1
###
### Retrieve data
###
# id m1 m2 lnL sigma/L neff
col_lnL = 9
if opts.input_tides:
print(" Tides input")
col_lnL +=2
dat_orig = dat = np.loadtxt(opts.fname)
dat_orig = dat[dat[:,col_lnL].argsort()] # sort http://stackoverflow.com/questions/2828059/sorting-arrays-in-numpy-by-column
print(" Original data size = ", len(dat), dat.shape)
###
### Convert data. Use lalsimutils for flexibility
###
P_list = []
dat_out =[]
symmetry_list =lalsimutils.symmetry_sign_exchange(coord_names) # identify symmetry due to exchange
mc_min = 1e10
mc_max = -1
mc_index = -1 # index of mchirp in parameter index. To help with nonstandard GP
mc_cut_range = [-np.inf, np.inf]
if opts.mc_range:
mc_cut_range = eval(opts.mc_range) # throw out samples outside this range
print(" Stripping samples outside of ", mc_cut_range, " in mc")
P= lalsimutils.ChooseWaveformParams()
for line in dat:
# Skip precessing binaries unless explicitly requested not to!
if not opts.use_precessing and (line[3]**2 + line[4]**2 + line[6]**2 + line[7]**2)>0.01:
print(" Skipping precessing binaries ")
continue
if line[1]+line[2] > opts.M_max_cut:
if opts.verbose:
print(" Skipping ", line, " as too massive, with mass ", line[1]+line[2])
continue
if line[col_lnL+1] > opts.sigma_cut:
# if opts.verbose:
# print " Skipping ", line
continue
if line[col_lnL] < opts.lnL_cut:
continue # strip worthless points. DANGEROUS
mc_here = lalsimutils.mchirp(line[1],line[2])
if mc_here < mc_cut_range[0] or mc_here > mc_cut_range[1]:
if opts.verbose:
print("Stripping because sample outside of target mc range ", line)
continue
if line[col_lnL] < opts.lnL_peak_insane_cut:
P.fref = opts.fref # IMPORTANT if you are using a quantity that depends on J
P.fmin = opts.fmin
P.m1 = line[1]*lal.MSUN_SI
P.m2 = line[2]*lal.MSUN_SI
P.s1x = line[3]
P.s1y = line[4]
P.s1z = line[5]
P.s2x = line[6]
P.s2y = line[7]
P.s2z = line[8]
if opts.input_tides:
P.lambda1 = line[9]
P.lambda2 = line[10]
# INPUT GRID: Evaluate binary parameters on fitting coordinates
line_out = np.zeros(len(coord_names)+2)
for x in np.arange(len(coord_names)):
line_out[x] = P.extract_param(coord_names[x])
# line_out[x] = getattr(P, coord_names[x])
line_out[-2] = line[col_lnL]
line_out[-1] = line[col_lnL+1] # adjoin error estimate
dat_out.append(line_out)
# results using sampling coordinates (low_level_coord_names)
line_out = np.zeros(len(low_level_coord_names))
for x in np.arange(len(line_out)):
fac = 1
if low_level_coord_names[x] in ['mc','m1','m2','mtot']:
fac = lal.MSUN_SI
line_out[x] = P.extract_param(low_level_coord_names[x])/fac
if low_level_coord_names[x] in ['mc','mtot']: # only use one overall mass index
mc_index = x
# Update mc range
mc_here = lalsimutils.mchirp(line[1],line[2])
if mc_here < mc_min:
mc_min = mc_here
if mc_here > mc_max:
mc_max = mc_here
Pref_default = P.copy() # keep this around to fix the masses, if we don't have an inj
dat_out = np.array(dat_out)
print(" Stripped size = ", dat_out.shape)
# scale out mass units
for p in ['mc', 'm1', 'm2', 'mtot']:
if p in coord_names:
indx = coord_names.index(p)
dat_out[:,indx] /= lal.MSUN_SI
# Repack data
X =dat_out[:,0:len(coord_names)]
Y = dat_out[:,-2]
Y_err = dat_out[:,-1]
# Eliminate values with Y too small
max_lnL = np.max(Y)
indx_ok = Y>np.max(Y)-opts.lnL_offset
print(" Points used in fit : ", sum(indx_ok), " given max lnL ", max_lnL)
if max_lnL < 10:
# nothing matters, we will reject it anyways
indx_ok = np.ones(len(Y),dtype=bool)
elif sum(indx_ok) < 10: # and max_lnL > 30:
# mark the top 10 elements and use them for fits
# this may be VERY VERY DANGEROUS if the peak is high and poorly sampled
idx_sorted_index = np.lexsort((np.arange(len(Y)), Y)) # Sort the array of Y, recovering index values
indx_list = np.array( [[k, Y[k]] for k in idx_sorted_index]) # pair up with the weights again
indx_list = indx_list[::-1] # reverse, so most significant are first
indx_ok = map(int,indx_list[:10,0])
print(" Revised number of points for fit: ", sum(indx_ok), indx_ok, indx_list[:10])
X_raw = X.copy()
my_fit= None
if opts.fit_method == "quadratic":
print(" FIT METHOD ", opts.fit_method, " IS QUADRATIC")
X=X[indx_ok]
Y=Y[indx_ok]
Y_err = Y_err[indx_ok]
my_fit = fit_quadratic_alt(X,Y,symmetry_list=symmetry_list,verbose=opts.verbose)
elif opts.fit_method == "polynomial":
print(" FIT METHOD ", opts.fit_method, " IS POLYNOMIAL")
X=X[indx_ok]
Y=Y[indx_ok]
Y_err = Y_err[indx_ok]
my_fit = fit_polynomial(X,Y,symmetry_list=symmetry_list,y_errors=Y_err)
elif opts.fit_method == 'gp_hyper':
print(" FIT METHOD ", opts.fit_method, " IS GP with hypercube rescaling")
# some data truncation IS used for the GP, but beware
print(" Truncating data set used for GP, to reduce memory usage needed in matrix operations")
X=X[indx_ok]
Y=Y[indx_ok]
Y_err = Y_err[indx_ok]
my_fit = fit_gp(X,Y,y_errors=Y_err,hypercube_rescale=True)
elif opts.fit_method == 'gp':
print(" FIT METHOD ", opts.fit_method, " IS GP")
# some data truncation IS used for the GP, but beware
print(" Truncating data set used for GP, to reduce memory usage needed in matrix operations")
X=X[indx_ok]
Y=Y[indx_ok]
Y_err = Y_err[indx_ok]
my_fit = fit_gp(X,Y,y_errors=Y_err)
# Sort for later convenience (scatterplots, etc)
indx = Y.argsort()#[::-1]
X=X[indx]
Y=Y[indx]
###
### Coordinate conversion tool
###
def convert_coords(x_in):
return lalsimutils.convert_waveform_coordinates(x_in, coord_names=coord_names,low_level_coord_names=low_level_coord_names)
likelihood_function = None
if len(low_level_coord_names) ==1:
def likelihood_function(x):
if isinstance(x,float):
return np.exp(my_fit([x]))
else:
return np.exp(my_fit(convert_coords(np.array([x]).T) ))
if len(low_level_coord_names) ==2:
def likelihood_function(x,y):
if isinstance(x,float):
return np.exp(my_fit([x,y]))
else:
return np.exp(my_fit(convert_coords(np.array([x,y]).T)))
if len(low_level_coord_names) ==3:
def likelihood_function(x,y,z):
if isinstance(x,float):
return np.exp(my_fit([x,y,z]))
else:
return np.exp(my_fit(convert_coords(np.array([x,y,z]).T)))
if len(low_level_coord_names) ==4:
def likelihood_function(x,y,z,a):
if isinstance(x,float):
return np.exp(my_fit([x,y,z,a]))
else:
return np.exp(my_fit(convert_coords(np.array([x,y,z,a]).T)))
if len(low_level_coord_names) ==5:
def likelihood_function(x,y,z,a,b):
if isinstance(x,float):
return np.exp(my_fit([x,y,z,a,b]))
else:
return np.exp(my_fit(convert_coords(np.array([x,y,z,a,b]).T)))
if len(low_level_coord_names) ==6:
def likelihood_function(x,y,z,a,b,c):
if isinstance(x,float):
return np.exp(my_fit([x,y,z,a,b,c]))
else:
return np.exp(my_fit(convert_coords(np.array([x,y,z,a,b,c]).T)))
if len(low_level_coord_names) ==7:
def likelihood_function(x,y,z,a,b,c,d):
if isinstance(x,float):
return np.exp(my_fit([x,y,z,a,b,c,d]))
else:
return np.exp(my_fit(convert_coords(np.array([x,y,z,a,b,c,d]).T)))
if len(low_level_coord_names) ==8:
def likelihood_function(x,y,z,a,b,c,d,e):
if isinstance(x,float):
return np.exp(my_fit([x,y,z,a,b,c,d,e]))
else:
return np.exp(my_fit(convert_coords(np.array([x,y,z,a,b,c,d,e]).T)))
if len(low_level_coord_names) ==9:
def likelihood_function(x,y,z,a,b,c,d,e,f):
if isinstance(x,float):
return np.exp(my_fit([x,y,z,a,b,c,d,e,f]))
else:
return np.exp(my_fit(convert_coords(np.array([x,y,z,a,b,c,d,e,f]).T)))
if len(low_level_coord_names) ==10:
def likelihood_function(x,y,z,a,b,c,d,e,f,g):
if isinstance(x,float):
return np.exp(my_fit([x,y,z,a,b,c,d,e,f,g]))
else:
return np.exp(my_fit(convert_coords(np.array([x,y,z,a,b,c,d,e,f,g]).T)))
# PROCEDURE
# - Identify grid of desired parameters (e.g., ascii table)
# - Create
# Base
P_base = lalsimutils.xml_to_ChooseWaveformParams_array(opts.fname_xml_base)[0]
# Grid
samples_rec = np.genfromtxt(opts.fname_parameter_grid,names=True)
params_rec = samples_rec.dtype.names
# Conversion
P_list =[]
grid_list = []
lnL_list = []
for indx in np.arange(len(samples_rec[params_rec[0]])):
P = P_base.manual_copy()
for param in params_rec:
val = samples_rec[param][indx]
fac=1
if param in ['mc','m1','m2','mtot']:
fac = lal.MSUN_SI
P.assign_param(param,fac*val)
if opts.verbose:
P.print_params()
line_out = np.zeros(len(coord_names))
for x in np.arange(len(line_out)):
fac = 1
if coord_names[x] in ['mc','m1','m2','mtot']:
fac = lal.MSUN_SI
line_out[x] = P.extract_param(coord_names[x])/fac
# If opts.maximize_mass, we are reporting the likelihood maximized in total mass (all other parameters held fixed)
# Remember, mc_index tells us the variable we need to scale
arg=-1
if (not opts.maximize_mass) or mc_index <0:
arg = my_fit(line_out)[0]
else:
scalevec = np.ones(len(coord_names));
def scaledfunc(x):
scalevec[mc_index] = x
val = -my_fit(line_out*scalevec)
return -my_fit(line_out*scalevec)[0]
res= scipy.optimize.minimize(scaledfunc,1,bounds=[(0.01,100)],options={'maxiter':50}) # unlikely to have mass range scale of a factor of 10^4
arg = -scaledfunc(res.x)
grid_list.append(line_out)
lnL_list.append(arg)
print(line_out, arg)
n_params = len(grid_list[0])
dat_out = np.zeros( (len(grid_list), n_params+1))
dat_out[:,:n_params] = np.array(grid_list)
dat_out[:,-1] = np.array(lnL_list)
np.savetxt(opts.fname_out, dat_out)
| [
"RIFT.misc.ModifiedScikitFit.PolynomialFeatures",
"numpy.array",
"sklearn.gaussian_process.kernels.WhiteKernel",
"numpy.genfromtxt",
"RIFT.lalsimutils.tex_dictionary.keys",
"numpy.arange",
"sklearn.gaussian_process.GaussianProcessRegressor",
"RIFT.lalsimutils.mchirp",
"numpy.mean",
"glue.ligolw.lsctables.use_in",
"argparse.ArgumentParser",
"sklearn.gaussian_process.kernels.ConstantKernel",
"numpy.max",
"RIFT.lalsimutils.ChooseWaveformParams",
"sklearn.gaussian_process.kernels.RBF",
"scipy.optimize.minimize",
"numpy.savetxt",
"numpy.std",
"RIFT.lalsimutils.xml_to_ChooseWaveformParams_array",
"RIFT.lalsimutils.symmetry_sign_exchange",
"sklearn.linear_model.LinearRegression",
"RIFT.lalsimutils.convert_waveform_coordinates",
"numpy.power",
"numpy.zeros",
"numpy.loadtxt",
"RIFT.interpolators.BayesianLeastSquares.fit_quadratic"
] | [((1108, 1153), 'glue.ligolw.lsctables.use_in', 'lsctables.use_in', (['ligolw.LIGOLWContentHandler'], {}), '(ligolw.LIGOLWContentHandler)\n', (1124, 1153), False, 'from glue.ligolw import lsctables, utils, ligolw\n'), ((1763, 1788), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1786, 1788), False, 'import argparse\n'), ((17341, 17363), 'numpy.loadtxt', 'np.loadtxt', (['opts.fname'], {}), '(opts.fname)\n', (17351, 17363), True, 'import numpy as np\n'), ((17659, 17706), 'RIFT.lalsimutils.symmetry_sign_exchange', 'lalsimutils.symmetry_sign_exchange', (['coord_names'], {}), '(coord_names)\n', (17693, 17706), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((18058, 18092), 'RIFT.lalsimutils.ChooseWaveformParams', 'lalsimutils.ChooseWaveformParams', ([], {}), '()\n', (18090, 18092), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((20506, 20523), 'numpy.array', 'np.array', (['dat_out'], {}), '(dat_out)\n', (20514, 20523), True, 'import numpy as np\n'), ((20893, 20902), 'numpy.max', 'np.max', (['Y'], {}), '(Y)\n', (20899, 20902), True, 'import numpy as np\n'), ((26093, 26145), 'numpy.genfromtxt', 'np.genfromtxt', (['opts.fname_parameter_grid'], {'names': '(True)'}), '(opts.fname_parameter_grid, names=True)\n', (26106, 26145), True, 'import numpy as np\n'), ((27706, 27725), 'numpy.array', 'np.array', (['grid_list'], {}), '(grid_list)\n', (27714, 27725), True, 'import numpy as np\n'), ((27743, 27761), 'numpy.array', 'np.array', (['lnL_list'], {}), '(lnL_list)\n', (27751, 27761), True, 'import numpy as np\n'), ((27763, 27798), 'numpy.savetxt', 'np.savetxt', (['opts.fname_out', 'dat_out'], {}), '(opts.fname_out, dat_out)\n', (27773, 27798), True, 'import numpy as np\n'), ((10204, 10278), 'RIFT.interpolators.BayesianLeastSquares.fit_quadratic', 'BayesianLeastSquares.fit_quadratic', (['x', 'y'], {'gamma_x': 'gamma_x', 'verbose': 'verbose'}), '(x, y, gamma_x=gamma_x, verbose=verbose)\n', (10238, 10278), True, 'import RIFT.interpolators.BayesianLeastSquares as BayesianLeastSquares\n'), ((10407, 10452), 'numpy.savetxt', 'np.savetxt', (['"""lnL_peakval.dat"""', '[peak_val_est]'], {}), "('lnL_peakval.dat', [peak_val_est])\n", (10417, 10452), True, 'import numpy as np\n'), ((10487, 10529), 'numpy.savetxt', 'np.savetxt', (['"""lnL_bestpt.dat"""', 'best_val_est'], {}), "('lnL_bestpt.dat', best_val_est)\n", (10497, 10529), True, 'import numpy as np\n'), ((11124, 11153), 'numpy.arange', 'np.arange', (['(opts.fit_order + 1)'], {}), '(opts.fit_order + 1)\n', (11133, 11153), True, 'import numpy as np\n'), ((16901, 16948), 'RIFT.lalsimutils.symmetry_sign_exchange', 'lalsimutils.symmetry_sign_exchange', (['coord_names'], {}), '(coord_names)\n', (16935, 16948), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((18718, 18754), 'RIFT.lalsimutils.mchirp', 'lalsimutils.mchirp', (['line[1]', 'line[2]'], {}), '(line[1], line[2])\n', (18736, 18754), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((23186, 23306), 'RIFT.lalsimutils.convert_waveform_coordinates', 'lalsimutils.convert_waveform_coordinates', (['x_in'], {'coord_names': 'coord_names', 'low_level_coord_names': 'low_level_coord_names'}), '(x_in, coord_names=coord_names,\n low_level_coord_names=low_level_coord_names)\n', (23226, 23306), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((25998, 26064), 'RIFT.lalsimutils.xml_to_ChooseWaveformParams_array', 'lalsimutils.xml_to_ChooseWaveformParams_array', (['opts.fname_xml_base'], {}), '(opts.fname_xml_base)\n', (26043, 26064), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((1243, 1276), 'RIFT.lalsimutils.tex_dictionary.keys', 'lalsimutils.tex_dictionary.keys', ([], {}), '()\n', (1274, 1276), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((11169, 11233), 'RIFT.misc.ModifiedScikitFit.PolynomialFeatures', 'msf.PolynomialFeatures', ([], {'degree': 'indx', 'symmetry_list': 'symmetry_list'}), '(degree=indx, symmetry_list=symmetry_list)\n', (11191, 11233), True, 'import RIFT.misc.ModifiedScikitFit as msf\n'), ((12009, 12040), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (12038, 12040), False, 'from sklearn import linear_model\n'), ((14935, 14998), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {'kernel': 'kernel', 'n_restarts_optimizer': '(8)'}), '(kernel=kernel, n_restarts_optimizer=8)\n', (14959, 14998), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((15325, 15342), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (15333, 15342), True, 'import numpy as np\n'), ((15415, 15425), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (15422, 15425), True, 'import numpy as np\n'), ((15745, 15808), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {'kernel': 'kernel', 'n_restarts_optimizer': '(8)'}), '(kernel=kernel, n_restarts_optimizer=8)\n', (15769, 15808), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((20263, 20299), 'RIFT.lalsimutils.mchirp', 'lalsimutils.mchirp', (['line[1]', 'line[2]'], {}), '(line[1], line[2])\n', (20281, 20299), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((20916, 20925), 'numpy.max', 'np.max', (['Y'], {}), '(Y)\n', (20922, 20925), True, 'import numpy as np\n'), ((21428, 21475), 'numpy.array', 'np.array', (['[[k, Y[k]] for k in idx_sorted_index]'], {}), '([[k, Y[k]] for k in idx_sorted_index])\n', (21436, 21475), True, 'import numpy as np\n'), ((27341, 27431), 'scipy.optimize.minimize', 'scipy.optimize.minimize', (['scaledfunc', '(1)'], {'bounds': '[(0.01, 100)]', 'options': "{'maxiter': 50}"}), "(scaledfunc, 1, bounds=[(0.01, 100)], options={\n 'maxiter': 50})\n", (27364, 27431), False, 'import scipy\n'), ((10157, 10175), 'numpy.power', 'np.power', (['y_err', '(2)'], {}), '(y_err, 2)\n', (10165, 10175), True, 'import numpy as np\n'), ((12785, 12803), 'numpy.array', 'np.array', (['bic_list'], {}), '(bic_list)\n', (12793, 12803), True, 'import numpy as np\n'), ((14765, 14823), 'sklearn.gaussian_process.kernels.WhiteKernel', 'WhiteKernel', ([], {'noise_level': '(0.1)', 'noise_level_bounds': '(0.01, 1)'}), '(noise_level=0.1, noise_level_bounds=(0.01, 1))\n', (14776, 14823), False, 'from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel as C\n'), ((15623, 15681), 'sklearn.gaussian_process.kernels.WhiteKernel', 'WhiteKernel', ([], {'noise_level': '(0.1)', 'noise_level_bounds': '(0.01, 1)'}), '(noise_level=0.1, noise_level_bounds=(0.01, 1))\n', (15634, 15681), False, 'from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel as C\n'), ((13777, 13795), 'numpy.std', 'np.std', (['x[:, indx]'], {}), '(x[:, indx])\n', (13783, 13795), True, 'import numpy as np\n'), ((14101, 14119), 'numpy.std', 'np.std', (['x[:, indx]'], {}), '(x[:, indx])\n', (14107, 14119), True, 'import numpy as np\n'), ((14822, 14843), 'sklearn.gaussian_process.kernels.ConstantKernel', 'C', (['(0.5)', '(0.001, 10.0)'], {}), '(0.5, (0.001, 10.0))\n', (14823, 14843), True, 'from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel as C\n'), ((14841, 14920), 'sklearn.gaussian_process.kernels.RBF', 'RBF', ([], {'length_scale': 'length_scale_est', 'length_scale_bounds': 'length_scale_bounds_est'}), '(length_scale=length_scale_est, length_scale_bounds=length_scale_bounds_est)\n', (14844, 14920), False, 'from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel as C\n'), ((15680, 15701), 'sklearn.gaussian_process.kernels.ConstantKernel', 'C', (['(0.5)', '(0.001, 10.0)'], {}), '(0.5, (0.001, 10.0))\n', (15681, 15701), True, 'from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel as C\n'), ((14234, 14252), 'numpy.std', 'np.std', (['x[:, indx]'], {}), '(x[:, indx])\n', (14240, 14252), True, 'import numpy as np\n'), ((23544, 23557), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (23552, 23557), True, 'import numpy as np\n'), ((23777, 23793), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (23785, 23793), True, 'import numpy as np\n'), ((24014, 24033), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (24022, 24033), True, 'import numpy as np\n'), ((24257, 24279), 'numpy.array', 'np.array', (['[x, y, z, a]'], {}), '([x, y, z, a])\n', (24265, 24279), True, 'import numpy as np\n'), ((24506, 24531), 'numpy.array', 'np.array', (['[x, y, z, a, b]'], {}), '([x, y, z, a, b])\n', (24514, 24531), True, 'import numpy as np\n'), ((24761, 24789), 'numpy.array', 'np.array', (['[x, y, z, a, b, c]'], {}), '([x, y, z, a, b, c])\n', (24769, 24789), True, 'import numpy as np\n'), ((25022, 25053), 'numpy.array', 'np.array', (['[x, y, z, a, b, c, d]'], {}), '([x, y, z, a, b, c, d])\n', (25030, 25053), True, 'import numpy as np\n'), ((25289, 25323), 'numpy.array', 'np.array', (['[x, y, z, a, b, c, d, e]'], {}), '([x, y, z, a, b, c, d, e])\n', (25297, 25323), True, 'import numpy as np\n'), ((25562, 25599), 'numpy.array', 'np.array', (['[x, y, z, a, b, c, d, e, f]'], {}), '([x, y, z, a, b, c, d, e, f])\n', (25570, 25599), True, 'import numpy as np\n'), ((25842, 25882), 'numpy.array', 'np.array', (['[x, y, z, a, b, c, d, e, f, g]'], {}), '([x, y, z, a, b, c, d, e, f, g])\n', (25850, 25882), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.