index
int64 0
100k
| blob_id
stringlengths 40
40
| code
stringlengths 7
7.27M
| steps
listlengths 1
1.25k
| error
bool 2
classes |
---|---|---|---|---|
98,300 |
dcce2a88b5c518aa5f11dbf7b01bea2a9faa6805
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Cell Fracture",
"author": "ideasman42, phymec, Sergey Sharybin",
"version": (0, 1),
"blender": (2, 70, 0),
"location": "Edit panel of Tools tab, in Object mode, 3D View tools",
"description": "Fractured Object, Bomb, Projectile, Recorder",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Object/CellFracture",
"category": "Object"}
#if "bpy" in locals():
# import importlib
# importlib.reload(fracture_cell_setup)
import bpy
from bpy.props import (
StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
EnumProperty,
)
from bpy.types import Operator
def main_object(context, obj, level, **kw):
import random
# pull out some args
kw_copy = kw.copy()
use_recenter = kw_copy.pop("use_recenter")
use_remove_original = kw_copy.pop("use_remove_original")
recursion = kw_copy.pop("recursion")
recursion_source_limit = kw_copy.pop("recursion_source_limit")
recursion_clamp = kw_copy.pop("recursion_clamp")
recursion_chance = kw_copy.pop("recursion_chance")
recursion_chance_select = kw_copy.pop("recursion_chance_select")
use_layer_next = kw_copy.pop("use_layer_next")
use_layer_index = kw_copy.pop("use_layer_index")
group_name = kw_copy.pop("group_name")
use_island_split = kw_copy.pop("use_island_split")
use_debug_bool = kw_copy.pop("use_debug_bool")
use_interior_vgroup = kw_copy.pop("use_interior_vgroup")
use_sharp_edges = kw_copy.pop("use_sharp_edges")
use_sharp_edges_apply = kw_copy.pop("use_sharp_edges_apply")
collection = context.collection
if level != 0:
kw_copy["source_limit"] = recursion_source_limit
from . import fracture_cell_setup
# not essential but selection is visual distraction.
obj.select_set(False)
if kw_copy["use_debug_redraw"]:
obj_display_type_prev = obj.display_type
obj.display_type = 'WIRE'
objects = fracture_cell_setup.cell_fracture_objects(context, obj, **kw_copy)
objects = fracture_cell_setup.cell_fracture_boolean(context, obj, objects,
use_island_split=use_island_split,
use_interior_hide=(use_interior_vgroup or use_sharp_edges),
use_debug_bool=use_debug_bool,
use_debug_redraw=kw_copy["use_debug_redraw"],
level=level,
)
# must apply after boolean.
if use_recenter:
bpy.ops.object.origin_set({"selected_editable_objects": objects},
type='ORIGIN_GEOMETRY', center='MEDIAN')
if level == 0:
for level_sub in range(1, recursion + 1):
objects_recurse_input = [(i, o) for i, o in enumerate(objects)]
if recursion_chance != 1.0:
from mathutils import Vector
if recursion_chance_select == 'RANDOM':
random.shuffle(objects_recurse_input)
elif recursion_chance_select in {'SIZE_MIN', 'SIZE_MAX'}:
objects_recurse_input.sort(key=lambda ob_pair:
(Vector(ob_pair[1].bound_box[0]) -
Vector(ob_pair[1].bound_box[6])).length_squared)
if recursion_chance_select == 'SIZE_MAX':
objects_recurse_input.reverse()
elif recursion_chance_select in {'CURSOR_MIN', 'CURSOR_MAX'}:
c = scene.cursor_location.copy()
objects_recurse_input.sort(key=lambda ob_pair:
(ob_pair[1].location - c).length_squared)
if recursion_chance_select == 'CURSOR_MAX':
objects_recurse_input.reverse()
objects_recurse_input[int(recursion_chance * len(objects_recurse_input)):] = []
objects_recurse_input.sort()
# reverse index values so we can remove from original list.
objects_recurse_input.reverse()
objects_recursive = []
for i, obj_cell in objects_recurse_input:
assert(objects[i] is obj_cell)
objects_recursive += main_object(context, obj_cell, level_sub, **kw)
if use_remove_original:
collection.objects.unlink(obj_cell)
del objects[i]
if recursion_clamp and len(objects) + len(objects_recursive) >= recursion_clamp:
break
objects.extend(objects_recursive)
if recursion_clamp and len(objects) > recursion_clamp:
break
#--------------
# Level Options
if level == 0:
# import pdb; pdb.set_trace()
if use_interior_vgroup or use_sharp_edges:
fracture_cell_setup.cell_fracture_interior_handle(objects,
use_interior_vgroup=use_interior_vgroup,
use_sharp_edges=use_sharp_edges,
use_sharp_edges_apply=use_sharp_edges_apply,
)
#--------------
# Scene Options
# layer
layers_new = None
if use_layer_index != 0:
layers_new = [False] * 20
layers_new[use_layer_index - 1] = True
elif use_layer_next:
layers_new = [False] * 20
layers_new[(obj.layers[:].index(True) + 1) % 20] = True
if layers_new is not None:
for obj_cell in objects:
obj_cell.layers = layers_new
# group
if group_name:
group = bpy.data.collections.get(group_name)
if group is None:
group = bpy.data.collections.new(group_name)
group_objects = group.objects[:]
for obj_cell in objects:
if obj_cell not in group_objects:
group.objects.link(obj_cell)
if kw_copy["use_debug_redraw"]:
obj.display_type = obj_display_type_prev
# testing only!
# obj.hide = True
return objects
def main(context, **kw):
import time
t = time.time()
objects_context = context.selected_editable_objects
kw_copy = kw.copy()
# mass
mass_mode = kw_copy.pop("mass_mode")
mass = kw_copy.pop("mass")
objects = []
for obj in objects_context:
if obj.type == 'MESH':
objects += main_object(context, obj, 0, **kw_copy)
bpy.ops.object.select_all(action='DESELECT')
for obj_cell in objects:
obj_cell.select_set(True)
if mass_mode == 'UNIFORM':
for obj_cell in objects:
obj_cell.game.mass = mass
elif mass_mode == 'VOLUME':
from mathutils import Vector
def _get_volume(obj_cell):
def _getObjectBBMinMax():
min_co = Vector((1000000.0, 1000000.0, 1000000.0))
max_co = -min_co
matrix = obj_cell.matrix_world
for i in range(0, 8):
bb_vec = obj_cell.matrix_world * Vector(obj_cell.bound_box[i])
min_co[0] = min(bb_vec[0], min_co[0])
min_co[1] = min(bb_vec[1], min_co[1])
min_co[2] = min(bb_vec[2], min_co[2])
max_co[0] = max(bb_vec[0], max_co[0])
max_co[1] = max(bb_vec[1], max_co[1])
max_co[2] = max(bb_vec[2], max_co[2])
return (min_co, max_co)
def _getObjectVolume():
min_co, max_co = _getObjectBBMinMax()
x = max_co[0] - min_co[0]
y = max_co[1] - min_co[1]
z = max_co[2] - min_co[2]
volume = x * y * z
return volume
return _getObjectVolume()
obj_volume_ls = [_get_volume(obj_cell) for obj_cell in objects]
obj_volume_tot = sum(obj_volume_ls)
if obj_volume_tot > 0.0:
mass_fac = mass / obj_volume_tot
for i, obj_cell in enumerate(objects):
obj_cell.game.mass = obj_volume_ls[i] * mass_fac
else:
assert(0)
print("Done! %d objects in %.4f sec" % (len(objects), time.time() - t))
class FractureCell(Operator):
bl_idname = "object.add_fracture_cell_objects"
bl_label = "Cell fracture selected mesh objects"
bl_options = {'PRESET'}
# -------------------------------------------------------------------------
# Source Options
source: EnumProperty(
name="Source",
items=(('VERT_OWN', "Own Verts", "Use own vertices"),
('VERT_CHILD', "Child Verts", "Use child object vertices"),
('PARTICLE_OWN', "Own Particles", ("All particle systems of the "
"source object")),
('PARTICLE_CHILD', "Child Particles", ("All particle systems of the "
"child objects")),
('PENCIL', "Grease Pencil", "This object's grease pencil"),
),
options={'ENUM_FLAG'},
default={'PARTICLE_OWN'},
)
source_limit: IntProperty(
name="Source Limit",
description="Limit the number of input points, 0 for unlimited",
min=0, max=5000,
default=100,
)
source_noise: FloatProperty(
name="Noise",
description="Randomize point distribution",
min=0.0, max=1.0,
default=0.0,
)
cell_scale: FloatVectorProperty(
name="Scale",
description="Scale Cell Shape",
size=3,
min=0.0, max=1.0,
default=(1.0, 1.0, 1.0),
)
# -------------------------------------------------------------------------
# Recursion
recursion: IntProperty(
name="Recursion",
description="Break shards recursively",
min=0, max=5000,
default=0,
)
recursion_source_limit: IntProperty(
name="Source Limit",
description="Limit the number of input points, 0 for unlimited (applies to recursion only)",
min=0, max=5000,
default=8,
)
recursion_clamp: IntProperty(
name="Clamp Recursion",
description="Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables",
min=0, max=10000,
default=250,
)
recursion_chance: FloatProperty(
name="Random Factor",
description="Likelihood of recursion",
min=0.0, max=1.0,
default=0.25,
)
recursion_chance_select: EnumProperty(
name="Recurse Over",
items=(('RANDOM', "Random", ""),
('SIZE_MIN', "Small", "Recursively subdivide smaller objects"),
('SIZE_MAX', "Big", "Recursively subdivide bigger objects"),
('CURSOR_MIN', "Cursor Close", "Recursively subdivide objects closer to the cursor"),
('CURSOR_MAX', "Cursor Far", "Recursively subdivide objects farther from the cursor"),
),
default='SIZE_MIN',
)
# -------------------------------------------------------------------------
# Mesh Data Options
use_smooth_faces: BoolProperty(
name="Smooth Faces",
default=False,
)
use_sharp_edges: BoolProperty(
name="Sharp Edges",
description="Set sharp edges when disabled",
default=True,
)
use_sharp_edges_apply: BoolProperty(
name="Apply Split Edge",
description="Split sharp hard edges",
default=True,
)
use_data_match: BoolProperty(
name="Match Data",
description="Match original mesh materials and data layers",
default=True,
)
use_island_split: BoolProperty(
name="Split Islands",
description="Split disconnected meshes",
default=True,
)
margin: FloatProperty(
name="Margin",
description="Gaps for the fracture (gives more stable physics)",
min=0.0, max=1.0,
default=0.001,
)
material_index: IntProperty(
name="Material",
description="Material index for interior faces",
default=0,
)
use_interior_vgroup: BoolProperty(
name="Interior VGroup",
description="Create a vertex group for interior verts",
default=False,
)
# -------------------------------------------------------------------------
# Physics Options
mass_mode: EnumProperty(
name="Mass Mode",
items=(('VOLUME', "Volume", "Objects get part of specified mass based on their volume"),
('UNIFORM', "Uniform", "All objects get the specified mass"),
),
default='VOLUME',
)
mass: FloatProperty(
name="Mass",
description="Mass to give created objects",
min=0.001, max=1000.0,
default=1.0,
)
# -------------------------------------------------------------------------
# Object Options
use_recenter: BoolProperty(
name="Recenter",
description="Recalculate the center points after splitting",
default=True,
)
use_remove_original: BoolProperty(
name="Remove Original",
description="Removes the parents used to create the shatter",
default=True,
)
# -------------------------------------------------------------------------
# Scene Options
#
# .. different from object options in that this controls how the objects
# are setup in the scene.
use_layer_index: IntProperty(
name="Layer Index",
description="Layer to add the objects into or 0 for existing",
default=0,
min=0, max=20,
)
use_layer_next: BoolProperty(
name="Next Layer",
description="At the object into the next layer (layer index overrides)",
default=True,
)
group_name: StringProperty(
name="Group",
description="Create objects int a group "
"(use existing or create new)",
)
# -------------------------------------------------------------------------
# Debug
use_debug_points: BoolProperty(
name="Debug Points",
description="Create mesh data showing the points used for fracture",
default=False,
)
use_debug_redraw: BoolProperty(
name="Show Progress Realtime",
description="Redraw as fracture is done",
default=True,
)
use_debug_bool: BoolProperty(
name="Debug Boolean",
description="Skip applying the boolean modifier",
default=False,
)
def execute(self, context):
keywords = self.as_keywords() # ignore=("blah",)
main(context, **keywords)
return {'FINISHED'}
def invoke(self, context, event):
print(self.recursion_chance_select)
wm = context.window_manager
return wm.invoke_props_dialog(self, width=600)
def draw(self, context):
layout = self.layout
box = layout.box()
col = box.column()
col.label(text="Point Source")
rowsub = col.row()
rowsub.prop(self, "source")
rowsub = col.row()
rowsub.prop(self, "source_limit")
rowsub.prop(self, "source_noise")
rowsub = col.row()
rowsub.prop(self, "cell_scale")
box = layout.box()
col = box.column()
col.label(text="Recursive Shatter")
rowsub = col.row(align=True)
rowsub.prop(self, "recursion")
rowsub.prop(self, "recursion_source_limit")
rowsub.prop(self, "recursion_clamp")
rowsub = col.row()
rowsub.prop(self, "recursion_chance")
rowsub.prop(self, "recursion_chance_select", expand=True)
box = layout.box()
col = box.column()
col.label(text="Mesh Data")
rowsub = col.row()
rowsub.prop(self, "use_smooth_faces")
rowsub.prop(self, "use_sharp_edges")
rowsub.prop(self, "use_sharp_edges_apply")
rowsub.prop(self, "use_data_match")
rowsub = col.row()
# on same row for even layout but infact are not all that related
rowsub.prop(self, "material_index")
rowsub.prop(self, "use_interior_vgroup")
# could be own section, control how we subdiv
rowsub.prop(self, "margin")
rowsub.prop(self, "use_island_split")
box = layout.box()
col = box.column()
col.label(text="Physics")
rowsub = col.row(align=True)
rowsub.prop(self, "mass_mode")
rowsub.prop(self, "mass")
box = layout.box()
col = box.column()
col.label(text="Object")
rowsub = col.row(align=True)
rowsub.prop(self, "use_recenter")
box = layout.box()
col = box.column()
col.label(text="Scene")
rowsub = col.row(align=True)
rowsub.prop(self, "use_layer_index")
rowsub.prop(self, "use_layer_next")
rowsub.prop(self, "group_name")
box = layout.box()
col = box.column()
col.label(text="Debug")
rowsub = col.row(align=True)
rowsub.prop(self, "use_debug_redraw")
rowsub.prop(self, "use_debug_points")
rowsub.prop(self, "use_debug_bool")
def menu_func(self, context):
layout = self.layout
layout.label(text="Cell Fracture:")
layout.operator("object.add_fracture_cell_objects",
text="Cell Fracture")
def register():
bpy.utils.register_class(FractureCell)
bpy.types.VIEW3D_PT_tools_object.append(menu_func)
def unregister():
bpy.utils.unregister_class(FractureCell)
bpy.types.VIEW3D_PT_tools_object.remove(menu_func)
if __name__ == "__main__":
register()
|
[
"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nbl_info = {\n \"name\": \"Cell Fracture\",\n \"author\": \"ideasman42, phymec, Sergey Sharybin\",\n \"version\": (0, 1),\n \"blender\": (2, 70, 0),\n \"location\": \"Edit panel of Tools tab, in Object mode, 3D View tools\",\n \"description\": \"Fractured Object, Bomb, Projectile, Recorder\",\n \"warning\": \"\",\n \"wiki_url\": \"http://wiki.blender.org/index.php/Extensions:2.6/Py/\"\n \"Scripts/Object/CellFracture\",\n \"category\": \"Object\"}\n\n\n#if \"bpy\" in locals():\n# import importlib\n# importlib.reload(fracture_cell_setup)\n\nimport bpy\nfrom bpy.props import (\n StringProperty,\n BoolProperty,\n IntProperty,\n FloatProperty,\n FloatVectorProperty,\n EnumProperty,\n )\n\nfrom bpy.types import Operator\n\ndef main_object(context, obj, level, **kw):\n import random\n\n # pull out some args\n kw_copy = kw.copy()\n use_recenter = kw_copy.pop(\"use_recenter\")\n use_remove_original = kw_copy.pop(\"use_remove_original\")\n recursion = kw_copy.pop(\"recursion\")\n recursion_source_limit = kw_copy.pop(\"recursion_source_limit\")\n recursion_clamp = kw_copy.pop(\"recursion_clamp\")\n recursion_chance = kw_copy.pop(\"recursion_chance\")\n recursion_chance_select = kw_copy.pop(\"recursion_chance_select\")\n use_layer_next = kw_copy.pop(\"use_layer_next\")\n use_layer_index = kw_copy.pop(\"use_layer_index\")\n group_name = kw_copy.pop(\"group_name\")\n use_island_split = kw_copy.pop(\"use_island_split\")\n use_debug_bool = kw_copy.pop(\"use_debug_bool\")\n use_interior_vgroup = kw_copy.pop(\"use_interior_vgroup\")\n use_sharp_edges = kw_copy.pop(\"use_sharp_edges\")\n use_sharp_edges_apply = kw_copy.pop(\"use_sharp_edges_apply\")\n\n collection = context.collection\n\n if level != 0:\n kw_copy[\"source_limit\"] = recursion_source_limit\n\n from . import fracture_cell_setup\n\n # not essential but selection is visual distraction.\n obj.select_set(False)\n\n if kw_copy[\"use_debug_redraw\"]:\n obj_display_type_prev = obj.display_type\n obj.display_type = 'WIRE'\n\n objects = fracture_cell_setup.cell_fracture_objects(context, obj, **kw_copy)\n objects = fracture_cell_setup.cell_fracture_boolean(context, obj, objects,\n use_island_split=use_island_split,\n use_interior_hide=(use_interior_vgroup or use_sharp_edges),\n use_debug_bool=use_debug_bool,\n use_debug_redraw=kw_copy[\"use_debug_redraw\"],\n level=level,\n )\n\n # must apply after boolean.\n if use_recenter:\n bpy.ops.object.origin_set({\"selected_editable_objects\": objects},\n type='ORIGIN_GEOMETRY', center='MEDIAN')\n\n if level == 0:\n for level_sub in range(1, recursion + 1):\n\n objects_recurse_input = [(i, o) for i, o in enumerate(objects)]\n\n if recursion_chance != 1.0:\n from mathutils import Vector\n if recursion_chance_select == 'RANDOM':\n random.shuffle(objects_recurse_input)\n elif recursion_chance_select in {'SIZE_MIN', 'SIZE_MAX'}:\n objects_recurse_input.sort(key=lambda ob_pair:\n (Vector(ob_pair[1].bound_box[0]) -\n Vector(ob_pair[1].bound_box[6])).length_squared)\n if recursion_chance_select == 'SIZE_MAX':\n objects_recurse_input.reverse()\n elif recursion_chance_select in {'CURSOR_MIN', 'CURSOR_MAX'}:\n c = scene.cursor_location.copy()\n objects_recurse_input.sort(key=lambda ob_pair:\n (ob_pair[1].location - c).length_squared)\n if recursion_chance_select == 'CURSOR_MAX':\n objects_recurse_input.reverse()\n\n objects_recurse_input[int(recursion_chance * len(objects_recurse_input)):] = []\n objects_recurse_input.sort()\n\n # reverse index values so we can remove from original list.\n objects_recurse_input.reverse()\n\n objects_recursive = []\n for i, obj_cell in objects_recurse_input:\n assert(objects[i] is obj_cell)\n objects_recursive += main_object(context, obj_cell, level_sub, **kw)\n if use_remove_original:\n collection.objects.unlink(obj_cell)\n del objects[i]\n if recursion_clamp and len(objects) + len(objects_recursive) >= recursion_clamp:\n break\n objects.extend(objects_recursive)\n\n if recursion_clamp and len(objects) > recursion_clamp:\n break\n\n #--------------\n # Level Options\n if level == 0:\n # import pdb; pdb.set_trace()\n if use_interior_vgroup or use_sharp_edges:\n fracture_cell_setup.cell_fracture_interior_handle(objects,\n use_interior_vgroup=use_interior_vgroup,\n use_sharp_edges=use_sharp_edges,\n use_sharp_edges_apply=use_sharp_edges_apply,\n )\n\n #--------------\n # Scene Options\n\n # layer\n layers_new = None\n if use_layer_index != 0:\n layers_new = [False] * 20\n layers_new[use_layer_index - 1] = True\n elif use_layer_next:\n layers_new = [False] * 20\n layers_new[(obj.layers[:].index(True) + 1) % 20] = True\n\n if layers_new is not None:\n for obj_cell in objects:\n obj_cell.layers = layers_new\n\n # group\n if group_name:\n group = bpy.data.collections.get(group_name)\n if group is None:\n group = bpy.data.collections.new(group_name)\n group_objects = group.objects[:]\n for obj_cell in objects:\n if obj_cell not in group_objects:\n group.objects.link(obj_cell)\n\n if kw_copy[\"use_debug_redraw\"]:\n obj.display_type = obj_display_type_prev\n\n # testing only!\n # obj.hide = True\n return objects\n\n\ndef main(context, **kw):\n import time\n t = time.time()\n objects_context = context.selected_editable_objects\n\n kw_copy = kw.copy()\n\n # mass\n mass_mode = kw_copy.pop(\"mass_mode\")\n mass = kw_copy.pop(\"mass\")\n\n objects = []\n for obj in objects_context:\n if obj.type == 'MESH':\n objects += main_object(context, obj, 0, **kw_copy)\n\n bpy.ops.object.select_all(action='DESELECT')\n for obj_cell in objects:\n obj_cell.select_set(True)\n\n if mass_mode == 'UNIFORM':\n for obj_cell in objects:\n obj_cell.game.mass = mass\n elif mass_mode == 'VOLUME':\n from mathutils import Vector\n def _get_volume(obj_cell):\n def _getObjectBBMinMax():\n min_co = Vector((1000000.0, 1000000.0, 1000000.0))\n max_co = -min_co\n matrix = obj_cell.matrix_world\n for i in range(0, 8):\n bb_vec = obj_cell.matrix_world * Vector(obj_cell.bound_box[i])\n min_co[0] = min(bb_vec[0], min_co[0])\n min_co[1] = min(bb_vec[1], min_co[1])\n min_co[2] = min(bb_vec[2], min_co[2])\n max_co[0] = max(bb_vec[0], max_co[0])\n max_co[1] = max(bb_vec[1], max_co[1])\n max_co[2] = max(bb_vec[2], max_co[2])\n return (min_co, max_co)\n\n def _getObjectVolume():\n min_co, max_co = _getObjectBBMinMax()\n x = max_co[0] - min_co[0]\n y = max_co[1] - min_co[1]\n z = max_co[2] - min_co[2]\n volume = x * y * z\n return volume\n\n return _getObjectVolume()\n\n\n obj_volume_ls = [_get_volume(obj_cell) for obj_cell in objects]\n obj_volume_tot = sum(obj_volume_ls)\n if obj_volume_tot > 0.0:\n mass_fac = mass / obj_volume_tot\n for i, obj_cell in enumerate(objects):\n obj_cell.game.mass = obj_volume_ls[i] * mass_fac\n else:\n assert(0)\n\n print(\"Done! %d objects in %.4f sec\" % (len(objects), time.time() - t))\n\n\nclass FractureCell(Operator):\n bl_idname = \"object.add_fracture_cell_objects\"\n bl_label = \"Cell fracture selected mesh objects\"\n bl_options = {'PRESET'}\n\n # -------------------------------------------------------------------------\n # Source Options\n source: EnumProperty(\n name=\"Source\",\n items=(('VERT_OWN', \"Own Verts\", \"Use own vertices\"),\n ('VERT_CHILD', \"Child Verts\", \"Use child object vertices\"),\n ('PARTICLE_OWN', \"Own Particles\", (\"All particle systems of the \"\n \"source object\")),\n ('PARTICLE_CHILD', \"Child Particles\", (\"All particle systems of the \"\n \"child objects\")),\n ('PENCIL', \"Grease Pencil\", \"This object's grease pencil\"),\n ),\n options={'ENUM_FLAG'},\n default={'PARTICLE_OWN'},\n )\n\n source_limit: IntProperty(\n name=\"Source Limit\",\n description=\"Limit the number of input points, 0 for unlimited\",\n min=0, max=5000,\n default=100,\n )\n\n source_noise: FloatProperty(\n name=\"Noise\",\n description=\"Randomize point distribution\",\n min=0.0, max=1.0,\n default=0.0,\n )\n\n cell_scale: FloatVectorProperty(\n name=\"Scale\",\n description=\"Scale Cell Shape\",\n size=3,\n min=0.0, max=1.0,\n default=(1.0, 1.0, 1.0),\n )\n\n # -------------------------------------------------------------------------\n # Recursion\n\n recursion: IntProperty(\n name=\"Recursion\",\n description=\"Break shards recursively\",\n min=0, max=5000,\n default=0,\n )\n\n recursion_source_limit: IntProperty(\n name=\"Source Limit\",\n description=\"Limit the number of input points, 0 for unlimited (applies to recursion only)\",\n min=0, max=5000,\n default=8,\n )\n\n recursion_clamp: IntProperty(\n name=\"Clamp Recursion\",\n description=\"Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables\",\n min=0, max=10000,\n default=250,\n )\n\n recursion_chance: FloatProperty(\n name=\"Random Factor\",\n description=\"Likelihood of recursion\",\n min=0.0, max=1.0,\n default=0.25,\n )\n\n recursion_chance_select: EnumProperty(\n name=\"Recurse Over\",\n items=(('RANDOM', \"Random\", \"\"),\n ('SIZE_MIN', \"Small\", \"Recursively subdivide smaller objects\"),\n ('SIZE_MAX', \"Big\", \"Recursively subdivide bigger objects\"),\n ('CURSOR_MIN', \"Cursor Close\", \"Recursively subdivide objects closer to the cursor\"),\n ('CURSOR_MAX', \"Cursor Far\", \"Recursively subdivide objects farther from the cursor\"),\n ),\n default='SIZE_MIN',\n )\n\n # -------------------------------------------------------------------------\n # Mesh Data Options\n\n use_smooth_faces: BoolProperty(\n name=\"Smooth Faces\",\n default=False,\n )\n\n use_sharp_edges: BoolProperty(\n name=\"Sharp Edges\",\n description=\"Set sharp edges when disabled\",\n default=True,\n )\n\n use_sharp_edges_apply: BoolProperty(\n name=\"Apply Split Edge\",\n description=\"Split sharp hard edges\",\n default=True,\n )\n\n use_data_match: BoolProperty(\n name=\"Match Data\",\n description=\"Match original mesh materials and data layers\",\n default=True,\n )\n\n use_island_split: BoolProperty(\n name=\"Split Islands\",\n description=\"Split disconnected meshes\",\n default=True,\n )\n\n margin: FloatProperty(\n name=\"Margin\",\n description=\"Gaps for the fracture (gives more stable physics)\",\n min=0.0, max=1.0,\n default=0.001,\n )\n\n material_index: IntProperty(\n name=\"Material\",\n description=\"Material index for interior faces\",\n default=0,\n )\n\n use_interior_vgroup: BoolProperty(\n name=\"Interior VGroup\",\n description=\"Create a vertex group for interior verts\",\n default=False,\n )\n\n # -------------------------------------------------------------------------\n # Physics Options\n\n mass_mode: EnumProperty(\n name=\"Mass Mode\",\n items=(('VOLUME', \"Volume\", \"Objects get part of specified mass based on their volume\"),\n ('UNIFORM', \"Uniform\", \"All objects get the specified mass\"),\n ),\n default='VOLUME',\n )\n\n mass: FloatProperty(\n name=\"Mass\",\n description=\"Mass to give created objects\",\n min=0.001, max=1000.0,\n default=1.0,\n )\n\n\n # -------------------------------------------------------------------------\n # Object Options\n\n use_recenter: BoolProperty(\n name=\"Recenter\",\n description=\"Recalculate the center points after splitting\",\n default=True,\n )\n\n use_remove_original: BoolProperty(\n name=\"Remove Original\",\n description=\"Removes the parents used to create the shatter\",\n default=True,\n )\n\n # -------------------------------------------------------------------------\n # Scene Options\n #\n # .. different from object options in that this controls how the objects\n # are setup in the scene.\n\n use_layer_index: IntProperty(\n name=\"Layer Index\",\n description=\"Layer to add the objects into or 0 for existing\",\n default=0,\n min=0, max=20,\n )\n\n use_layer_next: BoolProperty(\n name=\"Next Layer\",\n description=\"At the object into the next layer (layer index overrides)\",\n default=True,\n )\n\n group_name: StringProperty(\n name=\"Group\",\n description=\"Create objects int a group \"\n \"(use existing or create new)\",\n )\n\n # -------------------------------------------------------------------------\n # Debug\n use_debug_points: BoolProperty(\n name=\"Debug Points\",\n description=\"Create mesh data showing the points used for fracture\",\n default=False,\n )\n\n use_debug_redraw: BoolProperty(\n name=\"Show Progress Realtime\",\n description=\"Redraw as fracture is done\",\n default=True,\n )\n\n use_debug_bool: BoolProperty(\n name=\"Debug Boolean\",\n description=\"Skip applying the boolean modifier\",\n default=False,\n )\n\n def execute(self, context):\n keywords = self.as_keywords() # ignore=(\"blah\",)\n\n main(context, **keywords)\n\n return {'FINISHED'}\n\n\n def invoke(self, context, event):\n print(self.recursion_chance_select)\n wm = context.window_manager\n return wm.invoke_props_dialog(self, width=600)\n\n def draw(self, context):\n layout = self.layout\n box = layout.box()\n col = box.column()\n col.label(text=\"Point Source\")\n rowsub = col.row()\n rowsub.prop(self, \"source\")\n rowsub = col.row()\n rowsub.prop(self, \"source_limit\")\n rowsub.prop(self, \"source_noise\")\n rowsub = col.row()\n rowsub.prop(self, \"cell_scale\")\n\n box = layout.box()\n col = box.column()\n col.label(text=\"Recursive Shatter\")\n rowsub = col.row(align=True)\n rowsub.prop(self, \"recursion\")\n rowsub.prop(self, \"recursion_source_limit\")\n rowsub.prop(self, \"recursion_clamp\")\n rowsub = col.row()\n rowsub.prop(self, \"recursion_chance\")\n rowsub.prop(self, \"recursion_chance_select\", expand=True)\n\n box = layout.box()\n col = box.column()\n col.label(text=\"Mesh Data\")\n rowsub = col.row()\n rowsub.prop(self, \"use_smooth_faces\")\n rowsub.prop(self, \"use_sharp_edges\")\n rowsub.prop(self, \"use_sharp_edges_apply\")\n rowsub.prop(self, \"use_data_match\")\n rowsub = col.row()\n\n # on same row for even layout but infact are not all that related\n rowsub.prop(self, \"material_index\")\n rowsub.prop(self, \"use_interior_vgroup\")\n\n # could be own section, control how we subdiv\n rowsub.prop(self, \"margin\")\n rowsub.prop(self, \"use_island_split\")\n\n\n box = layout.box()\n col = box.column()\n col.label(text=\"Physics\")\n rowsub = col.row(align=True)\n rowsub.prop(self, \"mass_mode\")\n rowsub.prop(self, \"mass\")\n\n\n box = layout.box()\n col = box.column()\n col.label(text=\"Object\")\n rowsub = col.row(align=True)\n rowsub.prop(self, \"use_recenter\")\n\n\n box = layout.box()\n col = box.column()\n col.label(text=\"Scene\")\n rowsub = col.row(align=True)\n rowsub.prop(self, \"use_layer_index\")\n rowsub.prop(self, \"use_layer_next\")\n rowsub.prop(self, \"group_name\")\n\n box = layout.box()\n col = box.column()\n col.label(text=\"Debug\")\n rowsub = col.row(align=True)\n rowsub.prop(self, \"use_debug_redraw\")\n rowsub.prop(self, \"use_debug_points\")\n rowsub.prop(self, \"use_debug_bool\")\n\n\ndef menu_func(self, context):\n layout = self.layout\n layout.label(text=\"Cell Fracture:\")\n layout.operator(\"object.add_fracture_cell_objects\",\n text=\"Cell Fracture\")\n\n\ndef register():\n bpy.utils.register_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.append(menu_func)\n\n\ndef unregister():\n bpy.utils.unregister_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.remove(menu_func)\n\n\nif __name__ == \"__main__\":\n register()\n",
"bl_info = {'name': 'Cell Fracture', 'author':\n 'ideasman42, phymec, Sergey Sharybin', 'version': (0, 1), 'blender': (2,\n 70, 0), 'location':\n 'Edit panel of Tools tab, in Object mode, 3D View tools', 'description':\n 'Fractured Object, Bomb, Projectile, Recorder', 'warning': '',\n 'wiki_url':\n 'http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/Object/CellFracture'\n , 'category': 'Object'}\nimport bpy\nfrom bpy.props import StringProperty, BoolProperty, IntProperty, FloatProperty, FloatVectorProperty, EnumProperty\nfrom bpy.types import Operator\n\n\ndef main_object(context, obj, level, **kw):\n import random\n kw_copy = kw.copy()\n use_recenter = kw_copy.pop('use_recenter')\n use_remove_original = kw_copy.pop('use_remove_original')\n recursion = kw_copy.pop('recursion')\n recursion_source_limit = kw_copy.pop('recursion_source_limit')\n recursion_clamp = kw_copy.pop('recursion_clamp')\n recursion_chance = kw_copy.pop('recursion_chance')\n recursion_chance_select = kw_copy.pop('recursion_chance_select')\n use_layer_next = kw_copy.pop('use_layer_next')\n use_layer_index = kw_copy.pop('use_layer_index')\n group_name = kw_copy.pop('group_name')\n use_island_split = kw_copy.pop('use_island_split')\n use_debug_bool = kw_copy.pop('use_debug_bool')\n use_interior_vgroup = kw_copy.pop('use_interior_vgroup')\n use_sharp_edges = kw_copy.pop('use_sharp_edges')\n use_sharp_edges_apply = kw_copy.pop('use_sharp_edges_apply')\n collection = context.collection\n if level != 0:\n kw_copy['source_limit'] = recursion_source_limit\n from . import fracture_cell_setup\n obj.select_set(False)\n if kw_copy['use_debug_redraw']:\n obj_display_type_prev = obj.display_type\n obj.display_type = 'WIRE'\n objects = fracture_cell_setup.cell_fracture_objects(context, obj, **kw_copy\n )\n objects = fracture_cell_setup.cell_fracture_boolean(context, obj,\n objects, use_island_split=use_island_split, use_interior_hide=\n use_interior_vgroup or use_sharp_edges, use_debug_bool=\n use_debug_bool, use_debug_redraw=kw_copy['use_debug_redraw'], level\n =level)\n if use_recenter:\n bpy.ops.object.origin_set({'selected_editable_objects': objects},\n type='ORIGIN_GEOMETRY', center='MEDIAN')\n if level == 0:\n for level_sub in range(1, recursion + 1):\n objects_recurse_input = [(i, o) for i, o in enumerate(objects)]\n if recursion_chance != 1.0:\n from mathutils import Vector\n if recursion_chance_select == 'RANDOM':\n random.shuffle(objects_recurse_input)\n elif recursion_chance_select in {'SIZE_MIN', 'SIZE_MAX'}:\n objects_recurse_input.sort(key=lambda ob_pair: (Vector(\n ob_pair[1].bound_box[0]) - Vector(ob_pair[1].\n bound_box[6])).length_squared)\n if recursion_chance_select == 'SIZE_MAX':\n objects_recurse_input.reverse()\n elif recursion_chance_select in {'CURSOR_MIN', 'CURSOR_MAX'}:\n c = scene.cursor_location.copy()\n objects_recurse_input.sort(key=lambda ob_pair: (ob_pair\n [1].location - c).length_squared)\n if recursion_chance_select == 'CURSOR_MAX':\n objects_recurse_input.reverse()\n objects_recurse_input[int(recursion_chance * len(\n objects_recurse_input)):] = []\n objects_recurse_input.sort()\n objects_recurse_input.reverse()\n objects_recursive = []\n for i, obj_cell in objects_recurse_input:\n assert objects[i] is obj_cell\n objects_recursive += main_object(context, obj_cell,\n level_sub, **kw)\n if use_remove_original:\n collection.objects.unlink(obj_cell)\n del objects[i]\n if recursion_clamp and len(objects) + len(objects_recursive\n ) >= recursion_clamp:\n break\n objects.extend(objects_recursive)\n if recursion_clamp and len(objects) > recursion_clamp:\n break\n if level == 0:\n if use_interior_vgroup or use_sharp_edges:\n fracture_cell_setup.cell_fracture_interior_handle(objects,\n use_interior_vgroup=use_interior_vgroup, use_sharp_edges=\n use_sharp_edges, use_sharp_edges_apply=use_sharp_edges_apply)\n layers_new = None\n if use_layer_index != 0:\n layers_new = [False] * 20\n layers_new[use_layer_index - 1] = True\n elif use_layer_next:\n layers_new = [False] * 20\n layers_new[(obj.layers[:].index(True) + 1) % 20] = True\n if layers_new is not None:\n for obj_cell in objects:\n obj_cell.layers = layers_new\n if group_name:\n group = bpy.data.collections.get(group_name)\n if group is None:\n group = bpy.data.collections.new(group_name)\n group_objects = group.objects[:]\n for obj_cell in objects:\n if obj_cell not in group_objects:\n group.objects.link(obj_cell)\n if kw_copy['use_debug_redraw']:\n obj.display_type = obj_display_type_prev\n return objects\n\n\ndef main(context, **kw):\n import time\n t = time.time()\n objects_context = context.selected_editable_objects\n kw_copy = kw.copy()\n mass_mode = kw_copy.pop('mass_mode')\n mass = kw_copy.pop('mass')\n objects = []\n for obj in objects_context:\n if obj.type == 'MESH':\n objects += main_object(context, obj, 0, **kw_copy)\n bpy.ops.object.select_all(action='DESELECT')\n for obj_cell in objects:\n obj_cell.select_set(True)\n if mass_mode == 'UNIFORM':\n for obj_cell in objects:\n obj_cell.game.mass = mass\n elif mass_mode == 'VOLUME':\n from mathutils import Vector\n\n def _get_volume(obj_cell):\n\n def _getObjectBBMinMax():\n min_co = Vector((1000000.0, 1000000.0, 1000000.0))\n max_co = -min_co\n matrix = obj_cell.matrix_world\n for i in range(0, 8):\n bb_vec = obj_cell.matrix_world * Vector(obj_cell.\n bound_box[i])\n min_co[0] = min(bb_vec[0], min_co[0])\n min_co[1] = min(bb_vec[1], min_co[1])\n min_co[2] = min(bb_vec[2], min_co[2])\n max_co[0] = max(bb_vec[0], max_co[0])\n max_co[1] = max(bb_vec[1], max_co[1])\n max_co[2] = max(bb_vec[2], max_co[2])\n return min_co, max_co\n\n def _getObjectVolume():\n min_co, max_co = _getObjectBBMinMax()\n x = max_co[0] - min_co[0]\n y = max_co[1] - min_co[1]\n z = max_co[2] - min_co[2]\n volume = x * y * z\n return volume\n return _getObjectVolume()\n obj_volume_ls = [_get_volume(obj_cell) for obj_cell in objects]\n obj_volume_tot = sum(obj_volume_ls)\n if obj_volume_tot > 0.0:\n mass_fac = mass / obj_volume_tot\n for i, obj_cell in enumerate(objects):\n obj_cell.game.mass = obj_volume_ls[i] * mass_fac\n else:\n assert 0\n print('Done! %d objects in %.4f sec' % (len(objects), time.time() - t))\n\n\nclass FractureCell(Operator):\n bl_idname = 'object.add_fracture_cell_objects'\n bl_label = 'Cell fracture selected mesh objects'\n bl_options = {'PRESET'}\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n\n def execute(self, context):\n keywords = self.as_keywords()\n main(context, **keywords)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n print(self.recursion_chance_select)\n wm = context.window_manager\n return wm.invoke_props_dialog(self, width=600)\n\n def draw(self, context):\n layout = self.layout\n box = layout.box()\n col = box.column()\n col.label(text='Point Source')\n rowsub = col.row()\n rowsub.prop(self, 'source')\n rowsub = col.row()\n rowsub.prop(self, 'source_limit')\n rowsub.prop(self, 'source_noise')\n rowsub = col.row()\n rowsub.prop(self, 'cell_scale')\n box = layout.box()\n col = box.column()\n col.label(text='Recursive Shatter')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'recursion')\n rowsub.prop(self, 'recursion_source_limit')\n rowsub.prop(self, 'recursion_clamp')\n rowsub = col.row()\n rowsub.prop(self, 'recursion_chance')\n rowsub.prop(self, 'recursion_chance_select', expand=True)\n box = layout.box()\n col = box.column()\n col.label(text='Mesh Data')\n rowsub = col.row()\n rowsub.prop(self, 'use_smooth_faces')\n rowsub.prop(self, 'use_sharp_edges')\n rowsub.prop(self, 'use_sharp_edges_apply')\n rowsub.prop(self, 'use_data_match')\n rowsub = col.row()\n rowsub.prop(self, 'material_index')\n rowsub.prop(self, 'use_interior_vgroup')\n rowsub.prop(self, 'margin')\n rowsub.prop(self, 'use_island_split')\n box = layout.box()\n col = box.column()\n col.label(text='Physics')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'mass_mode')\n rowsub.prop(self, 'mass')\n box = layout.box()\n col = box.column()\n col.label(text='Object')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_recenter')\n box = layout.box()\n col = box.column()\n col.label(text='Scene')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_layer_index')\n rowsub.prop(self, 'use_layer_next')\n rowsub.prop(self, 'group_name')\n box = layout.box()\n col = box.column()\n col.label(text='Debug')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_debug_redraw')\n rowsub.prop(self, 'use_debug_points')\n rowsub.prop(self, 'use_debug_bool')\n\n\ndef menu_func(self, context):\n layout = self.layout\n layout.label(text='Cell Fracture:')\n layout.operator('object.add_fracture_cell_objects', text='Cell Fracture')\n\n\ndef register():\n bpy.utils.register_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.append(menu_func)\n\n\ndef unregister():\n bpy.utils.unregister_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.remove(menu_func)\n\n\nif __name__ == '__main__':\n register()\n",
"bl_info = {'name': 'Cell Fracture', 'author':\n 'ideasman42, phymec, Sergey Sharybin', 'version': (0, 1), 'blender': (2,\n 70, 0), 'location':\n 'Edit panel of Tools tab, in Object mode, 3D View tools', 'description':\n 'Fractured Object, Bomb, Projectile, Recorder', 'warning': '',\n 'wiki_url':\n 'http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/Object/CellFracture'\n , 'category': 'Object'}\n<import token>\n\n\ndef main_object(context, obj, level, **kw):\n import random\n kw_copy = kw.copy()\n use_recenter = kw_copy.pop('use_recenter')\n use_remove_original = kw_copy.pop('use_remove_original')\n recursion = kw_copy.pop('recursion')\n recursion_source_limit = kw_copy.pop('recursion_source_limit')\n recursion_clamp = kw_copy.pop('recursion_clamp')\n recursion_chance = kw_copy.pop('recursion_chance')\n recursion_chance_select = kw_copy.pop('recursion_chance_select')\n use_layer_next = kw_copy.pop('use_layer_next')\n use_layer_index = kw_copy.pop('use_layer_index')\n group_name = kw_copy.pop('group_name')\n use_island_split = kw_copy.pop('use_island_split')\n use_debug_bool = kw_copy.pop('use_debug_bool')\n use_interior_vgroup = kw_copy.pop('use_interior_vgroup')\n use_sharp_edges = kw_copy.pop('use_sharp_edges')\n use_sharp_edges_apply = kw_copy.pop('use_sharp_edges_apply')\n collection = context.collection\n if level != 0:\n kw_copy['source_limit'] = recursion_source_limit\n from . import fracture_cell_setup\n obj.select_set(False)\n if kw_copy['use_debug_redraw']:\n obj_display_type_prev = obj.display_type\n obj.display_type = 'WIRE'\n objects = fracture_cell_setup.cell_fracture_objects(context, obj, **kw_copy\n )\n objects = fracture_cell_setup.cell_fracture_boolean(context, obj,\n objects, use_island_split=use_island_split, use_interior_hide=\n use_interior_vgroup or use_sharp_edges, use_debug_bool=\n use_debug_bool, use_debug_redraw=kw_copy['use_debug_redraw'], level\n =level)\n if use_recenter:\n bpy.ops.object.origin_set({'selected_editable_objects': objects},\n type='ORIGIN_GEOMETRY', center='MEDIAN')\n if level == 0:\n for level_sub in range(1, recursion + 1):\n objects_recurse_input = [(i, o) for i, o in enumerate(objects)]\n if recursion_chance != 1.0:\n from mathutils import Vector\n if recursion_chance_select == 'RANDOM':\n random.shuffle(objects_recurse_input)\n elif recursion_chance_select in {'SIZE_MIN', 'SIZE_MAX'}:\n objects_recurse_input.sort(key=lambda ob_pair: (Vector(\n ob_pair[1].bound_box[0]) - Vector(ob_pair[1].\n bound_box[6])).length_squared)\n if recursion_chance_select == 'SIZE_MAX':\n objects_recurse_input.reverse()\n elif recursion_chance_select in {'CURSOR_MIN', 'CURSOR_MAX'}:\n c = scene.cursor_location.copy()\n objects_recurse_input.sort(key=lambda ob_pair: (ob_pair\n [1].location - c).length_squared)\n if recursion_chance_select == 'CURSOR_MAX':\n objects_recurse_input.reverse()\n objects_recurse_input[int(recursion_chance * len(\n objects_recurse_input)):] = []\n objects_recurse_input.sort()\n objects_recurse_input.reverse()\n objects_recursive = []\n for i, obj_cell in objects_recurse_input:\n assert objects[i] is obj_cell\n objects_recursive += main_object(context, obj_cell,\n level_sub, **kw)\n if use_remove_original:\n collection.objects.unlink(obj_cell)\n del objects[i]\n if recursion_clamp and len(objects) + len(objects_recursive\n ) >= recursion_clamp:\n break\n objects.extend(objects_recursive)\n if recursion_clamp and len(objects) > recursion_clamp:\n break\n if level == 0:\n if use_interior_vgroup or use_sharp_edges:\n fracture_cell_setup.cell_fracture_interior_handle(objects,\n use_interior_vgroup=use_interior_vgroup, use_sharp_edges=\n use_sharp_edges, use_sharp_edges_apply=use_sharp_edges_apply)\n layers_new = None\n if use_layer_index != 0:\n layers_new = [False] * 20\n layers_new[use_layer_index - 1] = True\n elif use_layer_next:\n layers_new = [False] * 20\n layers_new[(obj.layers[:].index(True) + 1) % 20] = True\n if layers_new is not None:\n for obj_cell in objects:\n obj_cell.layers = layers_new\n if group_name:\n group = bpy.data.collections.get(group_name)\n if group is None:\n group = bpy.data.collections.new(group_name)\n group_objects = group.objects[:]\n for obj_cell in objects:\n if obj_cell not in group_objects:\n group.objects.link(obj_cell)\n if kw_copy['use_debug_redraw']:\n obj.display_type = obj_display_type_prev\n return objects\n\n\ndef main(context, **kw):\n import time\n t = time.time()\n objects_context = context.selected_editable_objects\n kw_copy = kw.copy()\n mass_mode = kw_copy.pop('mass_mode')\n mass = kw_copy.pop('mass')\n objects = []\n for obj in objects_context:\n if obj.type == 'MESH':\n objects += main_object(context, obj, 0, **kw_copy)\n bpy.ops.object.select_all(action='DESELECT')\n for obj_cell in objects:\n obj_cell.select_set(True)\n if mass_mode == 'UNIFORM':\n for obj_cell in objects:\n obj_cell.game.mass = mass\n elif mass_mode == 'VOLUME':\n from mathutils import Vector\n\n def _get_volume(obj_cell):\n\n def _getObjectBBMinMax():\n min_co = Vector((1000000.0, 1000000.0, 1000000.0))\n max_co = -min_co\n matrix = obj_cell.matrix_world\n for i in range(0, 8):\n bb_vec = obj_cell.matrix_world * Vector(obj_cell.\n bound_box[i])\n min_co[0] = min(bb_vec[0], min_co[0])\n min_co[1] = min(bb_vec[1], min_co[1])\n min_co[2] = min(bb_vec[2], min_co[2])\n max_co[0] = max(bb_vec[0], max_co[0])\n max_co[1] = max(bb_vec[1], max_co[1])\n max_co[2] = max(bb_vec[2], max_co[2])\n return min_co, max_co\n\n def _getObjectVolume():\n min_co, max_co = _getObjectBBMinMax()\n x = max_co[0] - min_co[0]\n y = max_co[1] - min_co[1]\n z = max_co[2] - min_co[2]\n volume = x * y * z\n return volume\n return _getObjectVolume()\n obj_volume_ls = [_get_volume(obj_cell) for obj_cell in objects]\n obj_volume_tot = sum(obj_volume_ls)\n if obj_volume_tot > 0.0:\n mass_fac = mass / obj_volume_tot\n for i, obj_cell in enumerate(objects):\n obj_cell.game.mass = obj_volume_ls[i] * mass_fac\n else:\n assert 0\n print('Done! %d objects in %.4f sec' % (len(objects), time.time() - t))\n\n\nclass FractureCell(Operator):\n bl_idname = 'object.add_fracture_cell_objects'\n bl_label = 'Cell fracture selected mesh objects'\n bl_options = {'PRESET'}\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n\n def execute(self, context):\n keywords = self.as_keywords()\n main(context, **keywords)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n print(self.recursion_chance_select)\n wm = context.window_manager\n return wm.invoke_props_dialog(self, width=600)\n\n def draw(self, context):\n layout = self.layout\n box = layout.box()\n col = box.column()\n col.label(text='Point Source')\n rowsub = col.row()\n rowsub.prop(self, 'source')\n rowsub = col.row()\n rowsub.prop(self, 'source_limit')\n rowsub.prop(self, 'source_noise')\n rowsub = col.row()\n rowsub.prop(self, 'cell_scale')\n box = layout.box()\n col = box.column()\n col.label(text='Recursive Shatter')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'recursion')\n rowsub.prop(self, 'recursion_source_limit')\n rowsub.prop(self, 'recursion_clamp')\n rowsub = col.row()\n rowsub.prop(self, 'recursion_chance')\n rowsub.prop(self, 'recursion_chance_select', expand=True)\n box = layout.box()\n col = box.column()\n col.label(text='Mesh Data')\n rowsub = col.row()\n rowsub.prop(self, 'use_smooth_faces')\n rowsub.prop(self, 'use_sharp_edges')\n rowsub.prop(self, 'use_sharp_edges_apply')\n rowsub.prop(self, 'use_data_match')\n rowsub = col.row()\n rowsub.prop(self, 'material_index')\n rowsub.prop(self, 'use_interior_vgroup')\n rowsub.prop(self, 'margin')\n rowsub.prop(self, 'use_island_split')\n box = layout.box()\n col = box.column()\n col.label(text='Physics')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'mass_mode')\n rowsub.prop(self, 'mass')\n box = layout.box()\n col = box.column()\n col.label(text='Object')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_recenter')\n box = layout.box()\n col = box.column()\n col.label(text='Scene')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_layer_index')\n rowsub.prop(self, 'use_layer_next')\n rowsub.prop(self, 'group_name')\n box = layout.box()\n col = box.column()\n col.label(text='Debug')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_debug_redraw')\n rowsub.prop(self, 'use_debug_points')\n rowsub.prop(self, 'use_debug_bool')\n\n\ndef menu_func(self, context):\n layout = self.layout\n layout.label(text='Cell Fracture:')\n layout.operator('object.add_fracture_cell_objects', text='Cell Fracture')\n\n\ndef register():\n bpy.utils.register_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.append(menu_func)\n\n\ndef unregister():\n bpy.utils.unregister_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.remove(menu_func)\n\n\nif __name__ == '__main__':\n register()\n",
"<assignment token>\n<import token>\n\n\ndef main_object(context, obj, level, **kw):\n import random\n kw_copy = kw.copy()\n use_recenter = kw_copy.pop('use_recenter')\n use_remove_original = kw_copy.pop('use_remove_original')\n recursion = kw_copy.pop('recursion')\n recursion_source_limit = kw_copy.pop('recursion_source_limit')\n recursion_clamp = kw_copy.pop('recursion_clamp')\n recursion_chance = kw_copy.pop('recursion_chance')\n recursion_chance_select = kw_copy.pop('recursion_chance_select')\n use_layer_next = kw_copy.pop('use_layer_next')\n use_layer_index = kw_copy.pop('use_layer_index')\n group_name = kw_copy.pop('group_name')\n use_island_split = kw_copy.pop('use_island_split')\n use_debug_bool = kw_copy.pop('use_debug_bool')\n use_interior_vgroup = kw_copy.pop('use_interior_vgroup')\n use_sharp_edges = kw_copy.pop('use_sharp_edges')\n use_sharp_edges_apply = kw_copy.pop('use_sharp_edges_apply')\n collection = context.collection\n if level != 0:\n kw_copy['source_limit'] = recursion_source_limit\n from . import fracture_cell_setup\n obj.select_set(False)\n if kw_copy['use_debug_redraw']:\n obj_display_type_prev = obj.display_type\n obj.display_type = 'WIRE'\n objects = fracture_cell_setup.cell_fracture_objects(context, obj, **kw_copy\n )\n objects = fracture_cell_setup.cell_fracture_boolean(context, obj,\n objects, use_island_split=use_island_split, use_interior_hide=\n use_interior_vgroup or use_sharp_edges, use_debug_bool=\n use_debug_bool, use_debug_redraw=kw_copy['use_debug_redraw'], level\n =level)\n if use_recenter:\n bpy.ops.object.origin_set({'selected_editable_objects': objects},\n type='ORIGIN_GEOMETRY', center='MEDIAN')\n if level == 0:\n for level_sub in range(1, recursion + 1):\n objects_recurse_input = [(i, o) for i, o in enumerate(objects)]\n if recursion_chance != 1.0:\n from mathutils import Vector\n if recursion_chance_select == 'RANDOM':\n random.shuffle(objects_recurse_input)\n elif recursion_chance_select in {'SIZE_MIN', 'SIZE_MAX'}:\n objects_recurse_input.sort(key=lambda ob_pair: (Vector(\n ob_pair[1].bound_box[0]) - Vector(ob_pair[1].\n bound_box[6])).length_squared)\n if recursion_chance_select == 'SIZE_MAX':\n objects_recurse_input.reverse()\n elif recursion_chance_select in {'CURSOR_MIN', 'CURSOR_MAX'}:\n c = scene.cursor_location.copy()\n objects_recurse_input.sort(key=lambda ob_pair: (ob_pair\n [1].location - c).length_squared)\n if recursion_chance_select == 'CURSOR_MAX':\n objects_recurse_input.reverse()\n objects_recurse_input[int(recursion_chance * len(\n objects_recurse_input)):] = []\n objects_recurse_input.sort()\n objects_recurse_input.reverse()\n objects_recursive = []\n for i, obj_cell in objects_recurse_input:\n assert objects[i] is obj_cell\n objects_recursive += main_object(context, obj_cell,\n level_sub, **kw)\n if use_remove_original:\n collection.objects.unlink(obj_cell)\n del objects[i]\n if recursion_clamp and len(objects) + len(objects_recursive\n ) >= recursion_clamp:\n break\n objects.extend(objects_recursive)\n if recursion_clamp and len(objects) > recursion_clamp:\n break\n if level == 0:\n if use_interior_vgroup or use_sharp_edges:\n fracture_cell_setup.cell_fracture_interior_handle(objects,\n use_interior_vgroup=use_interior_vgroup, use_sharp_edges=\n use_sharp_edges, use_sharp_edges_apply=use_sharp_edges_apply)\n layers_new = None\n if use_layer_index != 0:\n layers_new = [False] * 20\n layers_new[use_layer_index - 1] = True\n elif use_layer_next:\n layers_new = [False] * 20\n layers_new[(obj.layers[:].index(True) + 1) % 20] = True\n if layers_new is not None:\n for obj_cell in objects:\n obj_cell.layers = layers_new\n if group_name:\n group = bpy.data.collections.get(group_name)\n if group is None:\n group = bpy.data.collections.new(group_name)\n group_objects = group.objects[:]\n for obj_cell in objects:\n if obj_cell not in group_objects:\n group.objects.link(obj_cell)\n if kw_copy['use_debug_redraw']:\n obj.display_type = obj_display_type_prev\n return objects\n\n\ndef main(context, **kw):\n import time\n t = time.time()\n objects_context = context.selected_editable_objects\n kw_copy = kw.copy()\n mass_mode = kw_copy.pop('mass_mode')\n mass = kw_copy.pop('mass')\n objects = []\n for obj in objects_context:\n if obj.type == 'MESH':\n objects += main_object(context, obj, 0, **kw_copy)\n bpy.ops.object.select_all(action='DESELECT')\n for obj_cell in objects:\n obj_cell.select_set(True)\n if mass_mode == 'UNIFORM':\n for obj_cell in objects:\n obj_cell.game.mass = mass\n elif mass_mode == 'VOLUME':\n from mathutils import Vector\n\n def _get_volume(obj_cell):\n\n def _getObjectBBMinMax():\n min_co = Vector((1000000.0, 1000000.0, 1000000.0))\n max_co = -min_co\n matrix = obj_cell.matrix_world\n for i in range(0, 8):\n bb_vec = obj_cell.matrix_world * Vector(obj_cell.\n bound_box[i])\n min_co[0] = min(bb_vec[0], min_co[0])\n min_co[1] = min(bb_vec[1], min_co[1])\n min_co[2] = min(bb_vec[2], min_co[2])\n max_co[0] = max(bb_vec[0], max_co[0])\n max_co[1] = max(bb_vec[1], max_co[1])\n max_co[2] = max(bb_vec[2], max_co[2])\n return min_co, max_co\n\n def _getObjectVolume():\n min_co, max_co = _getObjectBBMinMax()\n x = max_co[0] - min_co[0]\n y = max_co[1] - min_co[1]\n z = max_co[2] - min_co[2]\n volume = x * y * z\n return volume\n return _getObjectVolume()\n obj_volume_ls = [_get_volume(obj_cell) for obj_cell in objects]\n obj_volume_tot = sum(obj_volume_ls)\n if obj_volume_tot > 0.0:\n mass_fac = mass / obj_volume_tot\n for i, obj_cell in enumerate(objects):\n obj_cell.game.mass = obj_volume_ls[i] * mass_fac\n else:\n assert 0\n print('Done! %d objects in %.4f sec' % (len(objects), time.time() - t))\n\n\nclass FractureCell(Operator):\n bl_idname = 'object.add_fracture_cell_objects'\n bl_label = 'Cell fracture selected mesh objects'\n bl_options = {'PRESET'}\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n\n def execute(self, context):\n keywords = self.as_keywords()\n main(context, **keywords)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n print(self.recursion_chance_select)\n wm = context.window_manager\n return wm.invoke_props_dialog(self, width=600)\n\n def draw(self, context):\n layout = self.layout\n box = layout.box()\n col = box.column()\n col.label(text='Point Source')\n rowsub = col.row()\n rowsub.prop(self, 'source')\n rowsub = col.row()\n rowsub.prop(self, 'source_limit')\n rowsub.prop(self, 'source_noise')\n rowsub = col.row()\n rowsub.prop(self, 'cell_scale')\n box = layout.box()\n col = box.column()\n col.label(text='Recursive Shatter')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'recursion')\n rowsub.prop(self, 'recursion_source_limit')\n rowsub.prop(self, 'recursion_clamp')\n rowsub = col.row()\n rowsub.prop(self, 'recursion_chance')\n rowsub.prop(self, 'recursion_chance_select', expand=True)\n box = layout.box()\n col = box.column()\n col.label(text='Mesh Data')\n rowsub = col.row()\n rowsub.prop(self, 'use_smooth_faces')\n rowsub.prop(self, 'use_sharp_edges')\n rowsub.prop(self, 'use_sharp_edges_apply')\n rowsub.prop(self, 'use_data_match')\n rowsub = col.row()\n rowsub.prop(self, 'material_index')\n rowsub.prop(self, 'use_interior_vgroup')\n rowsub.prop(self, 'margin')\n rowsub.prop(self, 'use_island_split')\n box = layout.box()\n col = box.column()\n col.label(text='Physics')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'mass_mode')\n rowsub.prop(self, 'mass')\n box = layout.box()\n col = box.column()\n col.label(text='Object')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_recenter')\n box = layout.box()\n col = box.column()\n col.label(text='Scene')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_layer_index')\n rowsub.prop(self, 'use_layer_next')\n rowsub.prop(self, 'group_name')\n box = layout.box()\n col = box.column()\n col.label(text='Debug')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_debug_redraw')\n rowsub.prop(self, 'use_debug_points')\n rowsub.prop(self, 'use_debug_bool')\n\n\ndef menu_func(self, context):\n layout = self.layout\n layout.label(text='Cell Fracture:')\n layout.operator('object.add_fracture_cell_objects', text='Cell Fracture')\n\n\ndef register():\n bpy.utils.register_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.append(menu_func)\n\n\ndef unregister():\n bpy.utils.unregister_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.remove(menu_func)\n\n\nif __name__ == '__main__':\n register()\n",
"<assignment token>\n<import token>\n\n\ndef main_object(context, obj, level, **kw):\n import random\n kw_copy = kw.copy()\n use_recenter = kw_copy.pop('use_recenter')\n use_remove_original = kw_copy.pop('use_remove_original')\n recursion = kw_copy.pop('recursion')\n recursion_source_limit = kw_copy.pop('recursion_source_limit')\n recursion_clamp = kw_copy.pop('recursion_clamp')\n recursion_chance = kw_copy.pop('recursion_chance')\n recursion_chance_select = kw_copy.pop('recursion_chance_select')\n use_layer_next = kw_copy.pop('use_layer_next')\n use_layer_index = kw_copy.pop('use_layer_index')\n group_name = kw_copy.pop('group_name')\n use_island_split = kw_copy.pop('use_island_split')\n use_debug_bool = kw_copy.pop('use_debug_bool')\n use_interior_vgroup = kw_copy.pop('use_interior_vgroup')\n use_sharp_edges = kw_copy.pop('use_sharp_edges')\n use_sharp_edges_apply = kw_copy.pop('use_sharp_edges_apply')\n collection = context.collection\n if level != 0:\n kw_copy['source_limit'] = recursion_source_limit\n from . import fracture_cell_setup\n obj.select_set(False)\n if kw_copy['use_debug_redraw']:\n obj_display_type_prev = obj.display_type\n obj.display_type = 'WIRE'\n objects = fracture_cell_setup.cell_fracture_objects(context, obj, **kw_copy\n )\n objects = fracture_cell_setup.cell_fracture_boolean(context, obj,\n objects, use_island_split=use_island_split, use_interior_hide=\n use_interior_vgroup or use_sharp_edges, use_debug_bool=\n use_debug_bool, use_debug_redraw=kw_copy['use_debug_redraw'], level\n =level)\n if use_recenter:\n bpy.ops.object.origin_set({'selected_editable_objects': objects},\n type='ORIGIN_GEOMETRY', center='MEDIAN')\n if level == 0:\n for level_sub in range(1, recursion + 1):\n objects_recurse_input = [(i, o) for i, o in enumerate(objects)]\n if recursion_chance != 1.0:\n from mathutils import Vector\n if recursion_chance_select == 'RANDOM':\n random.shuffle(objects_recurse_input)\n elif recursion_chance_select in {'SIZE_MIN', 'SIZE_MAX'}:\n objects_recurse_input.sort(key=lambda ob_pair: (Vector(\n ob_pair[1].bound_box[0]) - Vector(ob_pair[1].\n bound_box[6])).length_squared)\n if recursion_chance_select == 'SIZE_MAX':\n objects_recurse_input.reverse()\n elif recursion_chance_select in {'CURSOR_MIN', 'CURSOR_MAX'}:\n c = scene.cursor_location.copy()\n objects_recurse_input.sort(key=lambda ob_pair: (ob_pair\n [1].location - c).length_squared)\n if recursion_chance_select == 'CURSOR_MAX':\n objects_recurse_input.reverse()\n objects_recurse_input[int(recursion_chance * len(\n objects_recurse_input)):] = []\n objects_recurse_input.sort()\n objects_recurse_input.reverse()\n objects_recursive = []\n for i, obj_cell in objects_recurse_input:\n assert objects[i] is obj_cell\n objects_recursive += main_object(context, obj_cell,\n level_sub, **kw)\n if use_remove_original:\n collection.objects.unlink(obj_cell)\n del objects[i]\n if recursion_clamp and len(objects) + len(objects_recursive\n ) >= recursion_clamp:\n break\n objects.extend(objects_recursive)\n if recursion_clamp and len(objects) > recursion_clamp:\n break\n if level == 0:\n if use_interior_vgroup or use_sharp_edges:\n fracture_cell_setup.cell_fracture_interior_handle(objects,\n use_interior_vgroup=use_interior_vgroup, use_sharp_edges=\n use_sharp_edges, use_sharp_edges_apply=use_sharp_edges_apply)\n layers_new = None\n if use_layer_index != 0:\n layers_new = [False] * 20\n layers_new[use_layer_index - 1] = True\n elif use_layer_next:\n layers_new = [False] * 20\n layers_new[(obj.layers[:].index(True) + 1) % 20] = True\n if layers_new is not None:\n for obj_cell in objects:\n obj_cell.layers = layers_new\n if group_name:\n group = bpy.data.collections.get(group_name)\n if group is None:\n group = bpy.data.collections.new(group_name)\n group_objects = group.objects[:]\n for obj_cell in objects:\n if obj_cell not in group_objects:\n group.objects.link(obj_cell)\n if kw_copy['use_debug_redraw']:\n obj.display_type = obj_display_type_prev\n return objects\n\n\ndef main(context, **kw):\n import time\n t = time.time()\n objects_context = context.selected_editable_objects\n kw_copy = kw.copy()\n mass_mode = kw_copy.pop('mass_mode')\n mass = kw_copy.pop('mass')\n objects = []\n for obj in objects_context:\n if obj.type == 'MESH':\n objects += main_object(context, obj, 0, **kw_copy)\n bpy.ops.object.select_all(action='DESELECT')\n for obj_cell in objects:\n obj_cell.select_set(True)\n if mass_mode == 'UNIFORM':\n for obj_cell in objects:\n obj_cell.game.mass = mass\n elif mass_mode == 'VOLUME':\n from mathutils import Vector\n\n def _get_volume(obj_cell):\n\n def _getObjectBBMinMax():\n min_co = Vector((1000000.0, 1000000.0, 1000000.0))\n max_co = -min_co\n matrix = obj_cell.matrix_world\n for i in range(0, 8):\n bb_vec = obj_cell.matrix_world * Vector(obj_cell.\n bound_box[i])\n min_co[0] = min(bb_vec[0], min_co[0])\n min_co[1] = min(bb_vec[1], min_co[1])\n min_co[2] = min(bb_vec[2], min_co[2])\n max_co[0] = max(bb_vec[0], max_co[0])\n max_co[1] = max(bb_vec[1], max_co[1])\n max_co[2] = max(bb_vec[2], max_co[2])\n return min_co, max_co\n\n def _getObjectVolume():\n min_co, max_co = _getObjectBBMinMax()\n x = max_co[0] - min_co[0]\n y = max_co[1] - min_co[1]\n z = max_co[2] - min_co[2]\n volume = x * y * z\n return volume\n return _getObjectVolume()\n obj_volume_ls = [_get_volume(obj_cell) for obj_cell in objects]\n obj_volume_tot = sum(obj_volume_ls)\n if obj_volume_tot > 0.0:\n mass_fac = mass / obj_volume_tot\n for i, obj_cell in enumerate(objects):\n obj_cell.game.mass = obj_volume_ls[i] * mass_fac\n else:\n assert 0\n print('Done! %d objects in %.4f sec' % (len(objects), time.time() - t))\n\n\nclass FractureCell(Operator):\n bl_idname = 'object.add_fracture_cell_objects'\n bl_label = 'Cell fracture selected mesh objects'\n bl_options = {'PRESET'}\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n\n def execute(self, context):\n keywords = self.as_keywords()\n main(context, **keywords)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n print(self.recursion_chance_select)\n wm = context.window_manager\n return wm.invoke_props_dialog(self, width=600)\n\n def draw(self, context):\n layout = self.layout\n box = layout.box()\n col = box.column()\n col.label(text='Point Source')\n rowsub = col.row()\n rowsub.prop(self, 'source')\n rowsub = col.row()\n rowsub.prop(self, 'source_limit')\n rowsub.prop(self, 'source_noise')\n rowsub = col.row()\n rowsub.prop(self, 'cell_scale')\n box = layout.box()\n col = box.column()\n col.label(text='Recursive Shatter')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'recursion')\n rowsub.prop(self, 'recursion_source_limit')\n rowsub.prop(self, 'recursion_clamp')\n rowsub = col.row()\n rowsub.prop(self, 'recursion_chance')\n rowsub.prop(self, 'recursion_chance_select', expand=True)\n box = layout.box()\n col = box.column()\n col.label(text='Mesh Data')\n rowsub = col.row()\n rowsub.prop(self, 'use_smooth_faces')\n rowsub.prop(self, 'use_sharp_edges')\n rowsub.prop(self, 'use_sharp_edges_apply')\n rowsub.prop(self, 'use_data_match')\n rowsub = col.row()\n rowsub.prop(self, 'material_index')\n rowsub.prop(self, 'use_interior_vgroup')\n rowsub.prop(self, 'margin')\n rowsub.prop(self, 'use_island_split')\n box = layout.box()\n col = box.column()\n col.label(text='Physics')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'mass_mode')\n rowsub.prop(self, 'mass')\n box = layout.box()\n col = box.column()\n col.label(text='Object')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_recenter')\n box = layout.box()\n col = box.column()\n col.label(text='Scene')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_layer_index')\n rowsub.prop(self, 'use_layer_next')\n rowsub.prop(self, 'group_name')\n box = layout.box()\n col = box.column()\n col.label(text='Debug')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_debug_redraw')\n rowsub.prop(self, 'use_debug_points')\n rowsub.prop(self, 'use_debug_bool')\n\n\ndef menu_func(self, context):\n layout = self.layout\n layout.label(text='Cell Fracture:')\n layout.operator('object.add_fracture_cell_objects', text='Cell Fracture')\n\n\ndef register():\n bpy.utils.register_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.append(menu_func)\n\n\ndef unregister():\n bpy.utils.unregister_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.remove(menu_func)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\ndef main_object(context, obj, level, **kw):\n import random\n kw_copy = kw.copy()\n use_recenter = kw_copy.pop('use_recenter')\n use_remove_original = kw_copy.pop('use_remove_original')\n recursion = kw_copy.pop('recursion')\n recursion_source_limit = kw_copy.pop('recursion_source_limit')\n recursion_clamp = kw_copy.pop('recursion_clamp')\n recursion_chance = kw_copy.pop('recursion_chance')\n recursion_chance_select = kw_copy.pop('recursion_chance_select')\n use_layer_next = kw_copy.pop('use_layer_next')\n use_layer_index = kw_copy.pop('use_layer_index')\n group_name = kw_copy.pop('group_name')\n use_island_split = kw_copy.pop('use_island_split')\n use_debug_bool = kw_copy.pop('use_debug_bool')\n use_interior_vgroup = kw_copy.pop('use_interior_vgroup')\n use_sharp_edges = kw_copy.pop('use_sharp_edges')\n use_sharp_edges_apply = kw_copy.pop('use_sharp_edges_apply')\n collection = context.collection\n if level != 0:\n kw_copy['source_limit'] = recursion_source_limit\n from . import fracture_cell_setup\n obj.select_set(False)\n if kw_copy['use_debug_redraw']:\n obj_display_type_prev = obj.display_type\n obj.display_type = 'WIRE'\n objects = fracture_cell_setup.cell_fracture_objects(context, obj, **kw_copy\n )\n objects = fracture_cell_setup.cell_fracture_boolean(context, obj,\n objects, use_island_split=use_island_split, use_interior_hide=\n use_interior_vgroup or use_sharp_edges, use_debug_bool=\n use_debug_bool, use_debug_redraw=kw_copy['use_debug_redraw'], level\n =level)\n if use_recenter:\n bpy.ops.object.origin_set({'selected_editable_objects': objects},\n type='ORIGIN_GEOMETRY', center='MEDIAN')\n if level == 0:\n for level_sub in range(1, recursion + 1):\n objects_recurse_input = [(i, o) for i, o in enumerate(objects)]\n if recursion_chance != 1.0:\n from mathutils import Vector\n if recursion_chance_select == 'RANDOM':\n random.shuffle(objects_recurse_input)\n elif recursion_chance_select in {'SIZE_MIN', 'SIZE_MAX'}:\n objects_recurse_input.sort(key=lambda ob_pair: (Vector(\n ob_pair[1].bound_box[0]) - Vector(ob_pair[1].\n bound_box[6])).length_squared)\n if recursion_chance_select == 'SIZE_MAX':\n objects_recurse_input.reverse()\n elif recursion_chance_select in {'CURSOR_MIN', 'CURSOR_MAX'}:\n c = scene.cursor_location.copy()\n objects_recurse_input.sort(key=lambda ob_pair: (ob_pair\n [1].location - c).length_squared)\n if recursion_chance_select == 'CURSOR_MAX':\n objects_recurse_input.reverse()\n objects_recurse_input[int(recursion_chance * len(\n objects_recurse_input)):] = []\n objects_recurse_input.sort()\n objects_recurse_input.reverse()\n objects_recursive = []\n for i, obj_cell in objects_recurse_input:\n assert objects[i] is obj_cell\n objects_recursive += main_object(context, obj_cell,\n level_sub, **kw)\n if use_remove_original:\n collection.objects.unlink(obj_cell)\n del objects[i]\n if recursion_clamp and len(objects) + len(objects_recursive\n ) >= recursion_clamp:\n break\n objects.extend(objects_recursive)\n if recursion_clamp and len(objects) > recursion_clamp:\n break\n if level == 0:\n if use_interior_vgroup or use_sharp_edges:\n fracture_cell_setup.cell_fracture_interior_handle(objects,\n use_interior_vgroup=use_interior_vgroup, use_sharp_edges=\n use_sharp_edges, use_sharp_edges_apply=use_sharp_edges_apply)\n layers_new = None\n if use_layer_index != 0:\n layers_new = [False] * 20\n layers_new[use_layer_index - 1] = True\n elif use_layer_next:\n layers_new = [False] * 20\n layers_new[(obj.layers[:].index(True) + 1) % 20] = True\n if layers_new is not None:\n for obj_cell in objects:\n obj_cell.layers = layers_new\n if group_name:\n group = bpy.data.collections.get(group_name)\n if group is None:\n group = bpy.data.collections.new(group_name)\n group_objects = group.objects[:]\n for obj_cell in objects:\n if obj_cell not in group_objects:\n group.objects.link(obj_cell)\n if kw_copy['use_debug_redraw']:\n obj.display_type = obj_display_type_prev\n return objects\n\n\ndef main(context, **kw):\n import time\n t = time.time()\n objects_context = context.selected_editable_objects\n kw_copy = kw.copy()\n mass_mode = kw_copy.pop('mass_mode')\n mass = kw_copy.pop('mass')\n objects = []\n for obj in objects_context:\n if obj.type == 'MESH':\n objects += main_object(context, obj, 0, **kw_copy)\n bpy.ops.object.select_all(action='DESELECT')\n for obj_cell in objects:\n obj_cell.select_set(True)\n if mass_mode == 'UNIFORM':\n for obj_cell in objects:\n obj_cell.game.mass = mass\n elif mass_mode == 'VOLUME':\n from mathutils import Vector\n\n def _get_volume(obj_cell):\n\n def _getObjectBBMinMax():\n min_co = Vector((1000000.0, 1000000.0, 1000000.0))\n max_co = -min_co\n matrix = obj_cell.matrix_world\n for i in range(0, 8):\n bb_vec = obj_cell.matrix_world * Vector(obj_cell.\n bound_box[i])\n min_co[0] = min(bb_vec[0], min_co[0])\n min_co[1] = min(bb_vec[1], min_co[1])\n min_co[2] = min(bb_vec[2], min_co[2])\n max_co[0] = max(bb_vec[0], max_co[0])\n max_co[1] = max(bb_vec[1], max_co[1])\n max_co[2] = max(bb_vec[2], max_co[2])\n return min_co, max_co\n\n def _getObjectVolume():\n min_co, max_co = _getObjectBBMinMax()\n x = max_co[0] - min_co[0]\n y = max_co[1] - min_co[1]\n z = max_co[2] - min_co[2]\n volume = x * y * z\n return volume\n return _getObjectVolume()\n obj_volume_ls = [_get_volume(obj_cell) for obj_cell in objects]\n obj_volume_tot = sum(obj_volume_ls)\n if obj_volume_tot > 0.0:\n mass_fac = mass / obj_volume_tot\n for i, obj_cell in enumerate(objects):\n obj_cell.game.mass = obj_volume_ls[i] * mass_fac\n else:\n assert 0\n print('Done! %d objects in %.4f sec' % (len(objects), time.time() - t))\n\n\nclass FractureCell(Operator):\n bl_idname = 'object.add_fracture_cell_objects'\n bl_label = 'Cell fracture selected mesh objects'\n bl_options = {'PRESET'}\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n\n def execute(self, context):\n keywords = self.as_keywords()\n main(context, **keywords)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n print(self.recursion_chance_select)\n wm = context.window_manager\n return wm.invoke_props_dialog(self, width=600)\n\n def draw(self, context):\n layout = self.layout\n box = layout.box()\n col = box.column()\n col.label(text='Point Source')\n rowsub = col.row()\n rowsub.prop(self, 'source')\n rowsub = col.row()\n rowsub.prop(self, 'source_limit')\n rowsub.prop(self, 'source_noise')\n rowsub = col.row()\n rowsub.prop(self, 'cell_scale')\n box = layout.box()\n col = box.column()\n col.label(text='Recursive Shatter')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'recursion')\n rowsub.prop(self, 'recursion_source_limit')\n rowsub.prop(self, 'recursion_clamp')\n rowsub = col.row()\n rowsub.prop(self, 'recursion_chance')\n rowsub.prop(self, 'recursion_chance_select', expand=True)\n box = layout.box()\n col = box.column()\n col.label(text='Mesh Data')\n rowsub = col.row()\n rowsub.prop(self, 'use_smooth_faces')\n rowsub.prop(self, 'use_sharp_edges')\n rowsub.prop(self, 'use_sharp_edges_apply')\n rowsub.prop(self, 'use_data_match')\n rowsub = col.row()\n rowsub.prop(self, 'material_index')\n rowsub.prop(self, 'use_interior_vgroup')\n rowsub.prop(self, 'margin')\n rowsub.prop(self, 'use_island_split')\n box = layout.box()\n col = box.column()\n col.label(text='Physics')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'mass_mode')\n rowsub.prop(self, 'mass')\n box = layout.box()\n col = box.column()\n col.label(text='Object')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_recenter')\n box = layout.box()\n col = box.column()\n col.label(text='Scene')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_layer_index')\n rowsub.prop(self, 'use_layer_next')\n rowsub.prop(self, 'group_name')\n box = layout.box()\n col = box.column()\n col.label(text='Debug')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_debug_redraw')\n rowsub.prop(self, 'use_debug_points')\n rowsub.prop(self, 'use_debug_bool')\n\n\n<function token>\n\n\ndef register():\n bpy.utils.register_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.append(menu_func)\n\n\ndef unregister():\n bpy.utils.unregister_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.remove(menu_func)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\ndef main_object(context, obj, level, **kw):\n import random\n kw_copy = kw.copy()\n use_recenter = kw_copy.pop('use_recenter')\n use_remove_original = kw_copy.pop('use_remove_original')\n recursion = kw_copy.pop('recursion')\n recursion_source_limit = kw_copy.pop('recursion_source_limit')\n recursion_clamp = kw_copy.pop('recursion_clamp')\n recursion_chance = kw_copy.pop('recursion_chance')\n recursion_chance_select = kw_copy.pop('recursion_chance_select')\n use_layer_next = kw_copy.pop('use_layer_next')\n use_layer_index = kw_copy.pop('use_layer_index')\n group_name = kw_copy.pop('group_name')\n use_island_split = kw_copy.pop('use_island_split')\n use_debug_bool = kw_copy.pop('use_debug_bool')\n use_interior_vgroup = kw_copy.pop('use_interior_vgroup')\n use_sharp_edges = kw_copy.pop('use_sharp_edges')\n use_sharp_edges_apply = kw_copy.pop('use_sharp_edges_apply')\n collection = context.collection\n if level != 0:\n kw_copy['source_limit'] = recursion_source_limit\n from . import fracture_cell_setup\n obj.select_set(False)\n if kw_copy['use_debug_redraw']:\n obj_display_type_prev = obj.display_type\n obj.display_type = 'WIRE'\n objects = fracture_cell_setup.cell_fracture_objects(context, obj, **kw_copy\n )\n objects = fracture_cell_setup.cell_fracture_boolean(context, obj,\n objects, use_island_split=use_island_split, use_interior_hide=\n use_interior_vgroup or use_sharp_edges, use_debug_bool=\n use_debug_bool, use_debug_redraw=kw_copy['use_debug_redraw'], level\n =level)\n if use_recenter:\n bpy.ops.object.origin_set({'selected_editable_objects': objects},\n type='ORIGIN_GEOMETRY', center='MEDIAN')\n if level == 0:\n for level_sub in range(1, recursion + 1):\n objects_recurse_input = [(i, o) for i, o in enumerate(objects)]\n if recursion_chance != 1.0:\n from mathutils import Vector\n if recursion_chance_select == 'RANDOM':\n random.shuffle(objects_recurse_input)\n elif recursion_chance_select in {'SIZE_MIN', 'SIZE_MAX'}:\n objects_recurse_input.sort(key=lambda ob_pair: (Vector(\n ob_pair[1].bound_box[0]) - Vector(ob_pair[1].\n bound_box[6])).length_squared)\n if recursion_chance_select == 'SIZE_MAX':\n objects_recurse_input.reverse()\n elif recursion_chance_select in {'CURSOR_MIN', 'CURSOR_MAX'}:\n c = scene.cursor_location.copy()\n objects_recurse_input.sort(key=lambda ob_pair: (ob_pair\n [1].location - c).length_squared)\n if recursion_chance_select == 'CURSOR_MAX':\n objects_recurse_input.reverse()\n objects_recurse_input[int(recursion_chance * len(\n objects_recurse_input)):] = []\n objects_recurse_input.sort()\n objects_recurse_input.reverse()\n objects_recursive = []\n for i, obj_cell in objects_recurse_input:\n assert objects[i] is obj_cell\n objects_recursive += main_object(context, obj_cell,\n level_sub, **kw)\n if use_remove_original:\n collection.objects.unlink(obj_cell)\n del objects[i]\n if recursion_clamp and len(objects) + len(objects_recursive\n ) >= recursion_clamp:\n break\n objects.extend(objects_recursive)\n if recursion_clamp and len(objects) > recursion_clamp:\n break\n if level == 0:\n if use_interior_vgroup or use_sharp_edges:\n fracture_cell_setup.cell_fracture_interior_handle(objects,\n use_interior_vgroup=use_interior_vgroup, use_sharp_edges=\n use_sharp_edges, use_sharp_edges_apply=use_sharp_edges_apply)\n layers_new = None\n if use_layer_index != 0:\n layers_new = [False] * 20\n layers_new[use_layer_index - 1] = True\n elif use_layer_next:\n layers_new = [False] * 20\n layers_new[(obj.layers[:].index(True) + 1) % 20] = True\n if layers_new is not None:\n for obj_cell in objects:\n obj_cell.layers = layers_new\n if group_name:\n group = bpy.data.collections.get(group_name)\n if group is None:\n group = bpy.data.collections.new(group_name)\n group_objects = group.objects[:]\n for obj_cell in objects:\n if obj_cell not in group_objects:\n group.objects.link(obj_cell)\n if kw_copy['use_debug_redraw']:\n obj.display_type = obj_display_type_prev\n return objects\n\n\ndef main(context, **kw):\n import time\n t = time.time()\n objects_context = context.selected_editable_objects\n kw_copy = kw.copy()\n mass_mode = kw_copy.pop('mass_mode')\n mass = kw_copy.pop('mass')\n objects = []\n for obj in objects_context:\n if obj.type == 'MESH':\n objects += main_object(context, obj, 0, **kw_copy)\n bpy.ops.object.select_all(action='DESELECT')\n for obj_cell in objects:\n obj_cell.select_set(True)\n if mass_mode == 'UNIFORM':\n for obj_cell in objects:\n obj_cell.game.mass = mass\n elif mass_mode == 'VOLUME':\n from mathutils import Vector\n\n def _get_volume(obj_cell):\n\n def _getObjectBBMinMax():\n min_co = Vector((1000000.0, 1000000.0, 1000000.0))\n max_co = -min_co\n matrix = obj_cell.matrix_world\n for i in range(0, 8):\n bb_vec = obj_cell.matrix_world * Vector(obj_cell.\n bound_box[i])\n min_co[0] = min(bb_vec[0], min_co[0])\n min_co[1] = min(bb_vec[1], min_co[1])\n min_co[2] = min(bb_vec[2], min_co[2])\n max_co[0] = max(bb_vec[0], max_co[0])\n max_co[1] = max(bb_vec[1], max_co[1])\n max_co[2] = max(bb_vec[2], max_co[2])\n return min_co, max_co\n\n def _getObjectVolume():\n min_co, max_co = _getObjectBBMinMax()\n x = max_co[0] - min_co[0]\n y = max_co[1] - min_co[1]\n z = max_co[2] - min_co[2]\n volume = x * y * z\n return volume\n return _getObjectVolume()\n obj_volume_ls = [_get_volume(obj_cell) for obj_cell in objects]\n obj_volume_tot = sum(obj_volume_ls)\n if obj_volume_tot > 0.0:\n mass_fac = mass / obj_volume_tot\n for i, obj_cell in enumerate(objects):\n obj_cell.game.mass = obj_volume_ls[i] * mass_fac\n else:\n assert 0\n print('Done! %d objects in %.4f sec' % (len(objects), time.time() - t))\n\n\nclass FractureCell(Operator):\n bl_idname = 'object.add_fracture_cell_objects'\n bl_label = 'Cell fracture selected mesh objects'\n bl_options = {'PRESET'}\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n\n def execute(self, context):\n keywords = self.as_keywords()\n main(context, **keywords)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n print(self.recursion_chance_select)\n wm = context.window_manager\n return wm.invoke_props_dialog(self, width=600)\n\n def draw(self, context):\n layout = self.layout\n box = layout.box()\n col = box.column()\n col.label(text='Point Source')\n rowsub = col.row()\n rowsub.prop(self, 'source')\n rowsub = col.row()\n rowsub.prop(self, 'source_limit')\n rowsub.prop(self, 'source_noise')\n rowsub = col.row()\n rowsub.prop(self, 'cell_scale')\n box = layout.box()\n col = box.column()\n col.label(text='Recursive Shatter')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'recursion')\n rowsub.prop(self, 'recursion_source_limit')\n rowsub.prop(self, 'recursion_clamp')\n rowsub = col.row()\n rowsub.prop(self, 'recursion_chance')\n rowsub.prop(self, 'recursion_chance_select', expand=True)\n box = layout.box()\n col = box.column()\n col.label(text='Mesh Data')\n rowsub = col.row()\n rowsub.prop(self, 'use_smooth_faces')\n rowsub.prop(self, 'use_sharp_edges')\n rowsub.prop(self, 'use_sharp_edges_apply')\n rowsub.prop(self, 'use_data_match')\n rowsub = col.row()\n rowsub.prop(self, 'material_index')\n rowsub.prop(self, 'use_interior_vgroup')\n rowsub.prop(self, 'margin')\n rowsub.prop(self, 'use_island_split')\n box = layout.box()\n col = box.column()\n col.label(text='Physics')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'mass_mode')\n rowsub.prop(self, 'mass')\n box = layout.box()\n col = box.column()\n col.label(text='Object')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_recenter')\n box = layout.box()\n col = box.column()\n col.label(text='Scene')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_layer_index')\n rowsub.prop(self, 'use_layer_next')\n rowsub.prop(self, 'group_name')\n box = layout.box()\n col = box.column()\n col.label(text='Debug')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_debug_redraw')\n rowsub.prop(self, 'use_debug_points')\n rowsub.prop(self, 'use_debug_bool')\n\n\n<function token>\n\n\ndef register():\n bpy.utils.register_class(FractureCell)\n bpy.types.VIEW3D_PT_tools_object.append(menu_func)\n\n\n<function token>\n<code token>\n",
"<assignment token>\n<import token>\n\n\ndef main_object(context, obj, level, **kw):\n import random\n kw_copy = kw.copy()\n use_recenter = kw_copy.pop('use_recenter')\n use_remove_original = kw_copy.pop('use_remove_original')\n recursion = kw_copy.pop('recursion')\n recursion_source_limit = kw_copy.pop('recursion_source_limit')\n recursion_clamp = kw_copy.pop('recursion_clamp')\n recursion_chance = kw_copy.pop('recursion_chance')\n recursion_chance_select = kw_copy.pop('recursion_chance_select')\n use_layer_next = kw_copy.pop('use_layer_next')\n use_layer_index = kw_copy.pop('use_layer_index')\n group_name = kw_copy.pop('group_name')\n use_island_split = kw_copy.pop('use_island_split')\n use_debug_bool = kw_copy.pop('use_debug_bool')\n use_interior_vgroup = kw_copy.pop('use_interior_vgroup')\n use_sharp_edges = kw_copy.pop('use_sharp_edges')\n use_sharp_edges_apply = kw_copy.pop('use_sharp_edges_apply')\n collection = context.collection\n if level != 0:\n kw_copy['source_limit'] = recursion_source_limit\n from . import fracture_cell_setup\n obj.select_set(False)\n if kw_copy['use_debug_redraw']:\n obj_display_type_prev = obj.display_type\n obj.display_type = 'WIRE'\n objects = fracture_cell_setup.cell_fracture_objects(context, obj, **kw_copy\n )\n objects = fracture_cell_setup.cell_fracture_boolean(context, obj,\n objects, use_island_split=use_island_split, use_interior_hide=\n use_interior_vgroup or use_sharp_edges, use_debug_bool=\n use_debug_bool, use_debug_redraw=kw_copy['use_debug_redraw'], level\n =level)\n if use_recenter:\n bpy.ops.object.origin_set({'selected_editable_objects': objects},\n type='ORIGIN_GEOMETRY', center='MEDIAN')\n if level == 0:\n for level_sub in range(1, recursion + 1):\n objects_recurse_input = [(i, o) for i, o in enumerate(objects)]\n if recursion_chance != 1.0:\n from mathutils import Vector\n if recursion_chance_select == 'RANDOM':\n random.shuffle(objects_recurse_input)\n elif recursion_chance_select in {'SIZE_MIN', 'SIZE_MAX'}:\n objects_recurse_input.sort(key=lambda ob_pair: (Vector(\n ob_pair[1].bound_box[0]) - Vector(ob_pair[1].\n bound_box[6])).length_squared)\n if recursion_chance_select == 'SIZE_MAX':\n objects_recurse_input.reverse()\n elif recursion_chance_select in {'CURSOR_MIN', 'CURSOR_MAX'}:\n c = scene.cursor_location.copy()\n objects_recurse_input.sort(key=lambda ob_pair: (ob_pair\n [1].location - c).length_squared)\n if recursion_chance_select == 'CURSOR_MAX':\n objects_recurse_input.reverse()\n objects_recurse_input[int(recursion_chance * len(\n objects_recurse_input)):] = []\n objects_recurse_input.sort()\n objects_recurse_input.reverse()\n objects_recursive = []\n for i, obj_cell in objects_recurse_input:\n assert objects[i] is obj_cell\n objects_recursive += main_object(context, obj_cell,\n level_sub, **kw)\n if use_remove_original:\n collection.objects.unlink(obj_cell)\n del objects[i]\n if recursion_clamp and len(objects) + len(objects_recursive\n ) >= recursion_clamp:\n break\n objects.extend(objects_recursive)\n if recursion_clamp and len(objects) > recursion_clamp:\n break\n if level == 0:\n if use_interior_vgroup or use_sharp_edges:\n fracture_cell_setup.cell_fracture_interior_handle(objects,\n use_interior_vgroup=use_interior_vgroup, use_sharp_edges=\n use_sharp_edges, use_sharp_edges_apply=use_sharp_edges_apply)\n layers_new = None\n if use_layer_index != 0:\n layers_new = [False] * 20\n layers_new[use_layer_index - 1] = True\n elif use_layer_next:\n layers_new = [False] * 20\n layers_new[(obj.layers[:].index(True) + 1) % 20] = True\n if layers_new is not None:\n for obj_cell in objects:\n obj_cell.layers = layers_new\n if group_name:\n group = bpy.data.collections.get(group_name)\n if group is None:\n group = bpy.data.collections.new(group_name)\n group_objects = group.objects[:]\n for obj_cell in objects:\n if obj_cell not in group_objects:\n group.objects.link(obj_cell)\n if kw_copy['use_debug_redraw']:\n obj.display_type = obj_display_type_prev\n return objects\n\n\ndef main(context, **kw):\n import time\n t = time.time()\n objects_context = context.selected_editable_objects\n kw_copy = kw.copy()\n mass_mode = kw_copy.pop('mass_mode')\n mass = kw_copy.pop('mass')\n objects = []\n for obj in objects_context:\n if obj.type == 'MESH':\n objects += main_object(context, obj, 0, **kw_copy)\n bpy.ops.object.select_all(action='DESELECT')\n for obj_cell in objects:\n obj_cell.select_set(True)\n if mass_mode == 'UNIFORM':\n for obj_cell in objects:\n obj_cell.game.mass = mass\n elif mass_mode == 'VOLUME':\n from mathutils import Vector\n\n def _get_volume(obj_cell):\n\n def _getObjectBBMinMax():\n min_co = Vector((1000000.0, 1000000.0, 1000000.0))\n max_co = -min_co\n matrix = obj_cell.matrix_world\n for i in range(0, 8):\n bb_vec = obj_cell.matrix_world * Vector(obj_cell.\n bound_box[i])\n min_co[0] = min(bb_vec[0], min_co[0])\n min_co[1] = min(bb_vec[1], min_co[1])\n min_co[2] = min(bb_vec[2], min_co[2])\n max_co[0] = max(bb_vec[0], max_co[0])\n max_co[1] = max(bb_vec[1], max_co[1])\n max_co[2] = max(bb_vec[2], max_co[2])\n return min_co, max_co\n\n def _getObjectVolume():\n min_co, max_co = _getObjectBBMinMax()\n x = max_co[0] - min_co[0]\n y = max_co[1] - min_co[1]\n z = max_co[2] - min_co[2]\n volume = x * y * z\n return volume\n return _getObjectVolume()\n obj_volume_ls = [_get_volume(obj_cell) for obj_cell in objects]\n obj_volume_tot = sum(obj_volume_ls)\n if obj_volume_tot > 0.0:\n mass_fac = mass / obj_volume_tot\n for i, obj_cell in enumerate(objects):\n obj_cell.game.mass = obj_volume_ls[i] * mass_fac\n else:\n assert 0\n print('Done! %d objects in %.4f sec' % (len(objects), time.time() - t))\n\n\nclass FractureCell(Operator):\n bl_idname = 'object.add_fracture_cell_objects'\n bl_label = 'Cell fracture selected mesh objects'\n bl_options = {'PRESET'}\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n\n def execute(self, context):\n keywords = self.as_keywords()\n main(context, **keywords)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n print(self.recursion_chance_select)\n wm = context.window_manager\n return wm.invoke_props_dialog(self, width=600)\n\n def draw(self, context):\n layout = self.layout\n box = layout.box()\n col = box.column()\n col.label(text='Point Source')\n rowsub = col.row()\n rowsub.prop(self, 'source')\n rowsub = col.row()\n rowsub.prop(self, 'source_limit')\n rowsub.prop(self, 'source_noise')\n rowsub = col.row()\n rowsub.prop(self, 'cell_scale')\n box = layout.box()\n col = box.column()\n col.label(text='Recursive Shatter')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'recursion')\n rowsub.prop(self, 'recursion_source_limit')\n rowsub.prop(self, 'recursion_clamp')\n rowsub = col.row()\n rowsub.prop(self, 'recursion_chance')\n rowsub.prop(self, 'recursion_chance_select', expand=True)\n box = layout.box()\n col = box.column()\n col.label(text='Mesh Data')\n rowsub = col.row()\n rowsub.prop(self, 'use_smooth_faces')\n rowsub.prop(self, 'use_sharp_edges')\n rowsub.prop(self, 'use_sharp_edges_apply')\n rowsub.prop(self, 'use_data_match')\n rowsub = col.row()\n rowsub.prop(self, 'material_index')\n rowsub.prop(self, 'use_interior_vgroup')\n rowsub.prop(self, 'margin')\n rowsub.prop(self, 'use_island_split')\n box = layout.box()\n col = box.column()\n col.label(text='Physics')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'mass_mode')\n rowsub.prop(self, 'mass')\n box = layout.box()\n col = box.column()\n col.label(text='Object')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_recenter')\n box = layout.box()\n col = box.column()\n col.label(text='Scene')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_layer_index')\n rowsub.prop(self, 'use_layer_next')\n rowsub.prop(self, 'group_name')\n box = layout.box()\n col = box.column()\n col.label(text='Debug')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_debug_redraw')\n rowsub.prop(self, 'use_debug_points')\n rowsub.prop(self, 'use_debug_bool')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<assignment token>\n<import token>\n<function token>\n\n\ndef main(context, **kw):\n import time\n t = time.time()\n objects_context = context.selected_editable_objects\n kw_copy = kw.copy()\n mass_mode = kw_copy.pop('mass_mode')\n mass = kw_copy.pop('mass')\n objects = []\n for obj in objects_context:\n if obj.type == 'MESH':\n objects += main_object(context, obj, 0, **kw_copy)\n bpy.ops.object.select_all(action='DESELECT')\n for obj_cell in objects:\n obj_cell.select_set(True)\n if mass_mode == 'UNIFORM':\n for obj_cell in objects:\n obj_cell.game.mass = mass\n elif mass_mode == 'VOLUME':\n from mathutils import Vector\n\n def _get_volume(obj_cell):\n\n def _getObjectBBMinMax():\n min_co = Vector((1000000.0, 1000000.0, 1000000.0))\n max_co = -min_co\n matrix = obj_cell.matrix_world\n for i in range(0, 8):\n bb_vec = obj_cell.matrix_world * Vector(obj_cell.\n bound_box[i])\n min_co[0] = min(bb_vec[0], min_co[0])\n min_co[1] = min(bb_vec[1], min_co[1])\n min_co[2] = min(bb_vec[2], min_co[2])\n max_co[0] = max(bb_vec[0], max_co[0])\n max_co[1] = max(bb_vec[1], max_co[1])\n max_co[2] = max(bb_vec[2], max_co[2])\n return min_co, max_co\n\n def _getObjectVolume():\n min_co, max_co = _getObjectBBMinMax()\n x = max_co[0] - min_co[0]\n y = max_co[1] - min_co[1]\n z = max_co[2] - min_co[2]\n volume = x * y * z\n return volume\n return _getObjectVolume()\n obj_volume_ls = [_get_volume(obj_cell) for obj_cell in objects]\n obj_volume_tot = sum(obj_volume_ls)\n if obj_volume_tot > 0.0:\n mass_fac = mass / obj_volume_tot\n for i, obj_cell in enumerate(objects):\n obj_cell.game.mass = obj_volume_ls[i] * mass_fac\n else:\n assert 0\n print('Done! %d objects in %.4f sec' % (len(objects), time.time() - t))\n\n\nclass FractureCell(Operator):\n bl_idname = 'object.add_fracture_cell_objects'\n bl_label = 'Cell fracture selected mesh objects'\n bl_options = {'PRESET'}\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n\n def execute(self, context):\n keywords = self.as_keywords()\n main(context, **keywords)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n print(self.recursion_chance_select)\n wm = context.window_manager\n return wm.invoke_props_dialog(self, width=600)\n\n def draw(self, context):\n layout = self.layout\n box = layout.box()\n col = box.column()\n col.label(text='Point Source')\n rowsub = col.row()\n rowsub.prop(self, 'source')\n rowsub = col.row()\n rowsub.prop(self, 'source_limit')\n rowsub.prop(self, 'source_noise')\n rowsub = col.row()\n rowsub.prop(self, 'cell_scale')\n box = layout.box()\n col = box.column()\n col.label(text='Recursive Shatter')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'recursion')\n rowsub.prop(self, 'recursion_source_limit')\n rowsub.prop(self, 'recursion_clamp')\n rowsub = col.row()\n rowsub.prop(self, 'recursion_chance')\n rowsub.prop(self, 'recursion_chance_select', expand=True)\n box = layout.box()\n col = box.column()\n col.label(text='Mesh Data')\n rowsub = col.row()\n rowsub.prop(self, 'use_smooth_faces')\n rowsub.prop(self, 'use_sharp_edges')\n rowsub.prop(self, 'use_sharp_edges_apply')\n rowsub.prop(self, 'use_data_match')\n rowsub = col.row()\n rowsub.prop(self, 'material_index')\n rowsub.prop(self, 'use_interior_vgroup')\n rowsub.prop(self, 'margin')\n rowsub.prop(self, 'use_island_split')\n box = layout.box()\n col = box.column()\n col.label(text='Physics')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'mass_mode')\n rowsub.prop(self, 'mass')\n box = layout.box()\n col = box.column()\n col.label(text='Object')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_recenter')\n box = layout.box()\n col = box.column()\n col.label(text='Scene')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_layer_index')\n rowsub.prop(self, 'use_layer_next')\n rowsub.prop(self, 'group_name')\n box = layout.box()\n col = box.column()\n col.label(text='Debug')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_debug_redraw')\n rowsub.prop(self, 'use_debug_points')\n rowsub.prop(self, 'use_debug_bool')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<assignment token>\n<import token>\n<function token>\n<function token>\n\n\nclass FractureCell(Operator):\n bl_idname = 'object.add_fracture_cell_objects'\n bl_label = 'Cell fracture selected mesh objects'\n bl_options = {'PRESET'}\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n\n def execute(self, context):\n keywords = self.as_keywords()\n main(context, **keywords)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n print(self.recursion_chance_select)\n wm = context.window_manager\n return wm.invoke_props_dialog(self, width=600)\n\n def draw(self, context):\n layout = self.layout\n box = layout.box()\n col = box.column()\n col.label(text='Point Source')\n rowsub = col.row()\n rowsub.prop(self, 'source')\n rowsub = col.row()\n rowsub.prop(self, 'source_limit')\n rowsub.prop(self, 'source_noise')\n rowsub = col.row()\n rowsub.prop(self, 'cell_scale')\n box = layout.box()\n col = box.column()\n col.label(text='Recursive Shatter')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'recursion')\n rowsub.prop(self, 'recursion_source_limit')\n rowsub.prop(self, 'recursion_clamp')\n rowsub = col.row()\n rowsub.prop(self, 'recursion_chance')\n rowsub.prop(self, 'recursion_chance_select', expand=True)\n box = layout.box()\n col = box.column()\n col.label(text='Mesh Data')\n rowsub = col.row()\n rowsub.prop(self, 'use_smooth_faces')\n rowsub.prop(self, 'use_sharp_edges')\n rowsub.prop(self, 'use_sharp_edges_apply')\n rowsub.prop(self, 'use_data_match')\n rowsub = col.row()\n rowsub.prop(self, 'material_index')\n rowsub.prop(self, 'use_interior_vgroup')\n rowsub.prop(self, 'margin')\n rowsub.prop(self, 'use_island_split')\n box = layout.box()\n col = box.column()\n col.label(text='Physics')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'mass_mode')\n rowsub.prop(self, 'mass')\n box = layout.box()\n col = box.column()\n col.label(text='Object')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_recenter')\n box = layout.box()\n col = box.column()\n col.label(text='Scene')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_layer_index')\n rowsub.prop(self, 'use_layer_next')\n rowsub.prop(self, 'group_name')\n box = layout.box()\n col = box.column()\n col.label(text='Debug')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_debug_redraw')\n rowsub.prop(self, 'use_debug_points')\n rowsub.prop(self, 'use_debug_bool')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<assignment token>\n<import token>\n<function token>\n<function token>\n\n\nclass FractureCell(Operator):\n <assignment token>\n <assignment token>\n <assignment token>\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n\n def execute(self, context):\n keywords = self.as_keywords()\n main(context, **keywords)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n print(self.recursion_chance_select)\n wm = context.window_manager\n return wm.invoke_props_dialog(self, width=600)\n\n def draw(self, context):\n layout = self.layout\n box = layout.box()\n col = box.column()\n col.label(text='Point Source')\n rowsub = col.row()\n rowsub.prop(self, 'source')\n rowsub = col.row()\n rowsub.prop(self, 'source_limit')\n rowsub.prop(self, 'source_noise')\n rowsub = col.row()\n rowsub.prop(self, 'cell_scale')\n box = layout.box()\n col = box.column()\n col.label(text='Recursive Shatter')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'recursion')\n rowsub.prop(self, 'recursion_source_limit')\n rowsub.prop(self, 'recursion_clamp')\n rowsub = col.row()\n rowsub.prop(self, 'recursion_chance')\n rowsub.prop(self, 'recursion_chance_select', expand=True)\n box = layout.box()\n col = box.column()\n col.label(text='Mesh Data')\n rowsub = col.row()\n rowsub.prop(self, 'use_smooth_faces')\n rowsub.prop(self, 'use_sharp_edges')\n rowsub.prop(self, 'use_sharp_edges_apply')\n rowsub.prop(self, 'use_data_match')\n rowsub = col.row()\n rowsub.prop(self, 'material_index')\n rowsub.prop(self, 'use_interior_vgroup')\n rowsub.prop(self, 'margin')\n rowsub.prop(self, 'use_island_split')\n box = layout.box()\n col = box.column()\n col.label(text='Physics')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'mass_mode')\n rowsub.prop(self, 'mass')\n box = layout.box()\n col = box.column()\n col.label(text='Object')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_recenter')\n box = layout.box()\n col = box.column()\n col.label(text='Scene')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_layer_index')\n rowsub.prop(self, 'use_layer_next')\n rowsub.prop(self, 'group_name')\n box = layout.box()\n col = box.column()\n col.label(text='Debug')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_debug_redraw')\n rowsub.prop(self, 'use_debug_points')\n rowsub.prop(self, 'use_debug_bool')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<assignment token>\n<import token>\n<function token>\n<function token>\n\n\nclass FractureCell(Operator):\n <assignment token>\n <assignment token>\n <assignment token>\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n\n def execute(self, context):\n keywords = self.as_keywords()\n main(context, **keywords)\n return {'FINISHED'}\n <function token>\n\n def draw(self, context):\n layout = self.layout\n box = layout.box()\n col = box.column()\n col.label(text='Point Source')\n rowsub = col.row()\n rowsub.prop(self, 'source')\n rowsub = col.row()\n rowsub.prop(self, 'source_limit')\n rowsub.prop(self, 'source_noise')\n rowsub = col.row()\n rowsub.prop(self, 'cell_scale')\n box = layout.box()\n col = box.column()\n col.label(text='Recursive Shatter')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'recursion')\n rowsub.prop(self, 'recursion_source_limit')\n rowsub.prop(self, 'recursion_clamp')\n rowsub = col.row()\n rowsub.prop(self, 'recursion_chance')\n rowsub.prop(self, 'recursion_chance_select', expand=True)\n box = layout.box()\n col = box.column()\n col.label(text='Mesh Data')\n rowsub = col.row()\n rowsub.prop(self, 'use_smooth_faces')\n rowsub.prop(self, 'use_sharp_edges')\n rowsub.prop(self, 'use_sharp_edges_apply')\n rowsub.prop(self, 'use_data_match')\n rowsub = col.row()\n rowsub.prop(self, 'material_index')\n rowsub.prop(self, 'use_interior_vgroup')\n rowsub.prop(self, 'margin')\n rowsub.prop(self, 'use_island_split')\n box = layout.box()\n col = box.column()\n col.label(text='Physics')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'mass_mode')\n rowsub.prop(self, 'mass')\n box = layout.box()\n col = box.column()\n col.label(text='Object')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_recenter')\n box = layout.box()\n col = box.column()\n col.label(text='Scene')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_layer_index')\n rowsub.prop(self, 'use_layer_next')\n rowsub.prop(self, 'group_name')\n box = layout.box()\n col = box.column()\n col.label(text='Debug')\n rowsub = col.row(align=True)\n rowsub.prop(self, 'use_debug_redraw')\n rowsub.prop(self, 'use_debug_points')\n rowsub.prop(self, 'use_debug_bool')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<assignment token>\n<import token>\n<function token>\n<function token>\n\n\nclass FractureCell(Operator):\n <assignment token>\n <assignment token>\n <assignment token>\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n\n def execute(self, context):\n keywords = self.as_keywords()\n main(context, **keywords)\n return {'FINISHED'}\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<assignment token>\n<import token>\n<function token>\n<function token>\n\n\nclass FractureCell(Operator):\n <assignment token>\n <assignment token>\n <assignment token>\n source: EnumProperty(name='Source', items=(('VERT_OWN', 'Own Verts',\n 'Use own vertices'), ('VERT_CHILD', 'Child Verts',\n 'Use child object vertices'), ('PARTICLE_OWN', 'Own Particles',\n 'All particle systems of the source object'), ('PARTICLE_CHILD',\n 'Child Particles', 'All particle systems of the child objects'), (\n 'PENCIL', 'Grease Pencil', \"This object's grease pencil\")), options\n ={'ENUM_FLAG'}, default={'PARTICLE_OWN'})\n source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited', min=0, max=\n 5000, default=100)\n source_noise: FloatProperty(name='Noise', description=\n 'Randomize point distribution', min=0.0, max=1.0, default=0.0)\n cell_scale: FloatVectorProperty(name='Scale', description=\n 'Scale Cell Shape', size=3, min=0.0, max=1.0, default=(1.0, 1.0, 1.0))\n recursion: IntProperty(name='Recursion', description=\n 'Break shards recursively', min=0, max=5000, default=0)\n recursion_source_limit: IntProperty(name='Source Limit', description=\n 'Limit the number of input points, 0 for unlimited (applies to recursion only)'\n , min=0, max=5000, default=8)\n recursion_clamp: IntProperty(name='Clamp Recursion', description=\n 'Finish recursion when this number of objects is reached (prevents recursing for extended periods of time), zero disables'\n , min=0, max=10000, default=250)\n recursion_chance: FloatProperty(name='Random Factor', description=\n 'Likelihood of recursion', min=0.0, max=1.0, default=0.25)\n recursion_chance_select: EnumProperty(name='Recurse Over', items=((\n 'RANDOM', 'Random', ''), ('SIZE_MIN', 'Small',\n 'Recursively subdivide smaller objects'), ('SIZE_MAX', 'Big',\n 'Recursively subdivide bigger objects'), ('CURSOR_MIN',\n 'Cursor Close',\n 'Recursively subdivide objects closer to the cursor'), (\n 'CURSOR_MAX', 'Cursor Far',\n 'Recursively subdivide objects farther from the cursor')), default=\n 'SIZE_MIN')\n use_smooth_faces: BoolProperty(name='Smooth Faces', default=False)\n use_sharp_edges: BoolProperty(name='Sharp Edges', description=\n 'Set sharp edges when disabled', default=True)\n use_sharp_edges_apply: BoolProperty(name='Apply Split Edge',\n description='Split sharp hard edges', default=True)\n use_data_match: BoolProperty(name='Match Data', description=\n 'Match original mesh materials and data layers', default=True)\n use_island_split: BoolProperty(name='Split Islands', description=\n 'Split disconnected meshes', default=True)\n margin: FloatProperty(name='Margin', description=\n 'Gaps for the fracture (gives more stable physics)', min=0.0, max=\n 1.0, default=0.001)\n material_index: IntProperty(name='Material', description=\n 'Material index for interior faces', default=0)\n use_interior_vgroup: BoolProperty(name='Interior VGroup', description=\n 'Create a vertex group for interior verts', default=False)\n mass_mode: EnumProperty(name='Mass Mode', items=(('VOLUME', 'Volume',\n 'Objects get part of specified mass based on their volume'), (\n 'UNIFORM', 'Uniform', 'All objects get the specified mass')),\n default='VOLUME')\n mass: FloatProperty(name='Mass', description=\n 'Mass to give created objects', min=0.001, max=1000.0, default=1.0)\n use_recenter: BoolProperty(name='Recenter', description=\n 'Recalculate the center points after splitting', default=True)\n use_remove_original: BoolProperty(name='Remove Original', description=\n 'Removes the parents used to create the shatter', default=True)\n use_layer_index: IntProperty(name='Layer Index', description=\n 'Layer to add the objects into or 0 for existing', default=0, min=0,\n max=20)\n use_layer_next: BoolProperty(name='Next Layer', description=\n 'At the object into the next layer (layer index overrides)',\n default=True)\n group_name: StringProperty(name='Group', description=\n 'Create objects int a group (use existing or create new)')\n use_debug_points: BoolProperty(name='Debug Points', description=\n 'Create mesh data showing the points used for fracture', default=False)\n use_debug_redraw: BoolProperty(name='Show Progress Realtime',\n description='Redraw as fracture is done', default=True)\n use_debug_bool: BoolProperty(name='Debug Boolean', description=\n 'Skip applying the boolean modifier', default=False)\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<assignment token>\n<import token>\n<function token>\n<function token>\n<class token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,301 |
78bf2e85dfc38fa356ea0261a15ad9e475e3e629
|
p1 = '/etc/bash.bashrc'
try:
with open(p1) as bash:
# with open('a.bashrc', 'r') as bash:
file_text = bash.readlines()
uncomm_http = 'export http_proxy="http://proxy22.iitd.ac.in:3128"\n'
comm_http = '#' + uncomm_http
if comm_http in file_text:
pass
elif uncomm_http in file_text:
file_text.remove(uncomm_http)
file_text.append(comm_http)
else:
pass
with open(p1, 'w') as bash:
bash.writelines(file_text)
uncomm_https = 'export https_proxy="https://proxy22.iitd.ac.in:3128"\n'
comm_https = '#' + uncomm_https
if comm_https in file_text:
pass
elif uncomm_https in file_text:
file_text.remove(uncomm_https)
file_text.append(comm_https)
else:
pass
with open(p1, 'w') as bash:
bash.writelines(file_text)
except:
print("No file named "+p1)
##########################################################
# for apt
p1 = '/etc/apt/apt.conf'
try:
with open(p1) as bash:
# with open('a.bashrc', 'r') as bash:
file_text = bash.readlines()
uncomm_http = 'Acquire::http { Proxy "http://proxy22.iitd.ac.in:3128"; }\n'
comm_http = '#' + uncomm_http
if comm_http in file_text:
pass
elif uncomm_http in file_text:
file_text.remove(uncomm_http)
file_text.append(comm_http)
else:
pass
with open(p1, 'w') as bash:
bash.writelines(file_text)
uncomm_https = 'Acquire::https { Proxy "https://proxy22.iitd.ac.in:3128"; }\n'
comm_https = '#' + uncomm_https
if comm_https in file_text:
pass
elif uncomm_https in file_text:
file_text.remove(uncomm_https)
file_text.append(comm_https)
else:
pass
with open(p1, 'w') as bash:
bash.writelines(file_text)
except:
print("No file named "+p1)
|
[
"p1 = '/etc/bash.bashrc'\n\ntry:\n\twith open(p1) as bash:\n\t # with open('a.bashrc', 'r') as bash:\n\t file_text = bash.readlines()\n\n\tuncomm_http = 'export http_proxy=\"http://proxy22.iitd.ac.in:3128\"\\n'\n\tcomm_http = '#' + uncomm_http\n\n\tif comm_http in file_text:\n\t pass\n\telif uncomm_http in file_text:\n\t file_text.remove(uncomm_http)\n\t file_text.append(comm_http)\n\telse:\n\t pass\n\n\twith open(p1, 'w') as bash:\n\t bash.writelines(file_text)\n\n\tuncomm_https = 'export https_proxy=\"https://proxy22.iitd.ac.in:3128\"\\n'\n\tcomm_https = '#' + uncomm_https\n\n\tif comm_https in file_text:\n\t pass\n\telif uncomm_https in file_text:\n\t file_text.remove(uncomm_https)\n\t file_text.append(comm_https)\n\telse:\n\t pass\n\n\twith open(p1, 'w') as bash:\n\t bash.writelines(file_text)\nexcept:\n\tprint(\"No file named \"+p1)\n\n##########################################################\n\n# for apt\n\n\np1 = '/etc/apt/apt.conf'\ntry:\n\twith open(p1) as bash:\n\t # with open('a.bashrc', 'r') as bash:\n\t file_text = bash.readlines()\n\n\tuncomm_http = 'Acquire::http { Proxy \"http://proxy22.iitd.ac.in:3128\"; }\\n'\n\tcomm_http = '#' + uncomm_http\n\n\tif comm_http in file_text:\n\t pass\n\telif uncomm_http in file_text:\n\t file_text.remove(uncomm_http)\n\t file_text.append(comm_http)\n\telse:\n\t pass\n\n\twith open(p1, 'w') as bash:\n\t bash.writelines(file_text)\n\n\tuncomm_https = 'Acquire::https { Proxy \"https://proxy22.iitd.ac.in:3128\"; }\\n'\n\tcomm_https = '#' + uncomm_https\n\n\tif comm_https in file_text:\n\t pass\n\telif uncomm_https in file_text:\n\t file_text.remove(uncomm_https)\n\t file_text.append(comm_https)\n\telse:\n\t pass\n\n\twith open(p1, 'w') as bash:\n\t bash.writelines(file_text)\nexcept:\n\tprint(\"No file named \"+p1)\n",
"p1 = '/etc/bash.bashrc'\ntry:\n with open(p1) as bash:\n file_text = bash.readlines()\n uncomm_http = 'export http_proxy=\"http://proxy22.iitd.ac.in:3128\"\\n'\n comm_http = '#' + uncomm_http\n if comm_http in file_text:\n pass\n elif uncomm_http in file_text:\n file_text.remove(uncomm_http)\n file_text.append(comm_http)\n else:\n pass\n with open(p1, 'w') as bash:\n bash.writelines(file_text)\n uncomm_https = 'export https_proxy=\"https://proxy22.iitd.ac.in:3128\"\\n'\n comm_https = '#' + uncomm_https\n if comm_https in file_text:\n pass\n elif uncomm_https in file_text:\n file_text.remove(uncomm_https)\n file_text.append(comm_https)\n else:\n pass\n with open(p1, 'w') as bash:\n bash.writelines(file_text)\nexcept:\n print('No file named ' + p1)\np1 = '/etc/apt/apt.conf'\ntry:\n with open(p1) as bash:\n file_text = bash.readlines()\n uncomm_http = 'Acquire::http { Proxy \"http://proxy22.iitd.ac.in:3128\"; }\\n'\n comm_http = '#' + uncomm_http\n if comm_http in file_text:\n pass\n elif uncomm_http in file_text:\n file_text.remove(uncomm_http)\n file_text.append(comm_http)\n else:\n pass\n with open(p1, 'w') as bash:\n bash.writelines(file_text)\n uncomm_https = (\n 'Acquire::https { Proxy \"https://proxy22.iitd.ac.in:3128\"; }\\n')\n comm_https = '#' + uncomm_https\n if comm_https in file_text:\n pass\n elif uncomm_https in file_text:\n file_text.remove(uncomm_https)\n file_text.append(comm_https)\n else:\n pass\n with open(p1, 'w') as bash:\n bash.writelines(file_text)\nexcept:\n print('No file named ' + p1)\n",
"<assignment token>\ntry:\n with open(p1) as bash:\n file_text = bash.readlines()\n uncomm_http = 'export http_proxy=\"http://proxy22.iitd.ac.in:3128\"\\n'\n comm_http = '#' + uncomm_http\n if comm_http in file_text:\n pass\n elif uncomm_http in file_text:\n file_text.remove(uncomm_http)\n file_text.append(comm_http)\n else:\n pass\n with open(p1, 'w') as bash:\n bash.writelines(file_text)\n uncomm_https = 'export https_proxy=\"https://proxy22.iitd.ac.in:3128\"\\n'\n comm_https = '#' + uncomm_https\n if comm_https in file_text:\n pass\n elif uncomm_https in file_text:\n file_text.remove(uncomm_https)\n file_text.append(comm_https)\n else:\n pass\n with open(p1, 'w') as bash:\n bash.writelines(file_text)\nexcept:\n print('No file named ' + p1)\n<assignment token>\ntry:\n with open(p1) as bash:\n file_text = bash.readlines()\n uncomm_http = 'Acquire::http { Proxy \"http://proxy22.iitd.ac.in:3128\"; }\\n'\n comm_http = '#' + uncomm_http\n if comm_http in file_text:\n pass\n elif uncomm_http in file_text:\n file_text.remove(uncomm_http)\n file_text.append(comm_http)\n else:\n pass\n with open(p1, 'w') as bash:\n bash.writelines(file_text)\n uncomm_https = (\n 'Acquire::https { Proxy \"https://proxy22.iitd.ac.in:3128\"; }\\n')\n comm_https = '#' + uncomm_https\n if comm_https in file_text:\n pass\n elif uncomm_https in file_text:\n file_text.remove(uncomm_https)\n file_text.append(comm_https)\n else:\n pass\n with open(p1, 'w') as bash:\n bash.writelines(file_text)\nexcept:\n print('No file named ' + p1)\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,302 |
462b01b0e02c704ea1481e257c89248f6786ff52
|
# BJ2549 루빅의 사각형
import sys
from pprint import pprint
sys.stdin = open('input.txt','r')
cnt = 0
# 오른쪽으로 움직이는 함수, x 행을 n번 움직인다.
def moveR(x, n):
global cnt
tempX = mat[x]
cnt += n
for i in range(4):
tempX[i] = mat[x][x+n % 4]
if x + n < 4:
tempX[i] = mat[x][x+n]
elif x + n >= 4:
tempX[i] = mat[x][x+n-4]
# 아랫쪽으로 움직이는 함수, y 열을 n번 움직인다.
def moveD(x,y,n):
global cnt
tempY = [mat[x][y],]
pass
mat = [list(map(int,input().split())) for _ in range(4)]
mat2 = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]
temp = {}
temp2 = [[(0,0)]*4 for _ in range(4)]
for x in range(4):
for y in range(4):
temp[mat2[x][y]] = (x,y)
print(temp)
for x in range(4):
for y in range(4):
if mat[x][y] != mat2[x][y]:
x2, y2 = temp[mat[x][y]]
print(mat[x][y],x,y)
dx = 0
dy = 0
if x2 > x:
dx = x2-x
elif x > x2:
dx = 4 - (x - x2)
if y2 > y:
dy = y2-y
elif y > y2:
dy = 4 - (y - y2)
temp2[x][y] = (dx,dy)
pprint(temp2)
for x in range(4):
for y in range(4):
if mat[x][y] != mat2[x][y]:
x2, y2 = temp[mat[x][y]]
# print(mat[x][y],(x,y),temp[mat[x][y]])
dx = 0
dy = 0
if x2 > x:
dx = x2-x
elif x > x2:
dx = 4 - (x - x2)
if y2 > y:
dy = y2-y
elif y > y2:
dy = 4 - (y - y2)
temp2[x][y] = (dx,dy)
pprint(temp2)
mv = []
for x in range(4):
for y in range(4):
if temp2[x][y]:
print(mv)
a,b = temp2[x][y]
if a:
pprint(temp2)
do = 1
# print(a,b)
for i in range(4):
c,d = temp2[i][y]
if a == c:
do = 1
else:
do = 0
if do:
mv.append((2, y+1, a))
for j in range(4):
e,f = temp2[j][y]
if f:
# print((a+j) % 4)
temp2[j][y] = 0
temp2[(a + j) % 4][y] = (0,f)
else:
temp2[j][y] = 0
# print(mv)
# print(temp2)
elif b:
pprint(temp2)
do = 1
for l in range(4):
x3,y3 = temp2[x][l]
if b == y3:
do = 1
else:
do = 0
if do:
mv.append((1, x+1, b))
for k in range(4):
g,h = temp2[x][k]
if g:
temp2[x][k] = 0
temp2[x][(b+k) % 4] = (g,0)
else:
temp2[x][k] = 0
# temp2[x][k] = (g,0)
print(len(mv))
for move in mv:
for m in move:
print(m,end=' ')
print()
|
[
"# BJ2549 루빅의 사각형\n\nimport sys\nfrom pprint import pprint\nsys.stdin = open('input.txt','r')\n\ncnt = 0\n# 오른쪽으로 움직이는 함수, x 행을 n번 움직인다. \ndef moveR(x, n):\n global cnt\n tempX = mat[x]\n cnt += n\n for i in range(4):\n tempX[i] = mat[x][x+n % 4]\n if x + n < 4:\n tempX[i] = mat[x][x+n]\n elif x + n >= 4:\n tempX[i] = mat[x][x+n-4]\n\n# 아랫쪽으로 움직이는 함수, y 열을 n번 움직인다. \ndef moveD(x,y,n):\n global cnt\n tempY = [mat[x][y],]\n pass\n\n\nmat = [list(map(int,input().split())) for _ in range(4)]\nmat2 = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]\ntemp = {}\ntemp2 = [[(0,0)]*4 for _ in range(4)]\nfor x in range(4):\n for y in range(4):\n temp[mat2[x][y]] = (x,y)\nprint(temp)\n\nfor x in range(4):\n for y in range(4):\n if mat[x][y] != mat2[x][y]:\n x2, y2 = temp[mat[x][y]]\n print(mat[x][y],x,y)\n dx = 0\n dy = 0\n if x2 > x:\n dx = x2-x\n elif x > x2:\n dx = 4 - (x - x2)\n if y2 > y:\n dy = y2-y\n elif y > y2:\n dy = 4 - (y - y2)\n temp2[x][y] = (dx,dy)\npprint(temp2)\n\nfor x in range(4):\n for y in range(4):\n if mat[x][y] != mat2[x][y]:\n x2, y2 = temp[mat[x][y]]\n # print(mat[x][y],(x,y),temp[mat[x][y]])\n dx = 0\n dy = 0\n if x2 > x:\n dx = x2-x\n elif x > x2:\n dx = 4 - (x - x2)\n if y2 > y:\n dy = y2-y\n elif y > y2:\n dy = 4 - (y - y2)\n temp2[x][y] = (dx,dy)\n \npprint(temp2)\nmv = []\nfor x in range(4):\n for y in range(4):\n if temp2[x][y]:\n print(mv)\n a,b = temp2[x][y]\n if a:\n pprint(temp2)\n do = 1\n # print(a,b)\n for i in range(4):\n c,d = temp2[i][y]\n if a == c:\n do = 1\n else:\n do = 0\n if do:\n mv.append((2, y+1, a))\n for j in range(4):\n e,f = temp2[j][y]\n if f:\n # print((a+j) % 4)\n temp2[j][y] = 0\n temp2[(a + j) % 4][y] = (0,f)\n else:\n temp2[j][y] = 0\n # print(mv)\n # print(temp2)\n elif b:\n pprint(temp2)\n do = 1\n for l in range(4):\n x3,y3 = temp2[x][l]\n if b == y3:\n do = 1\n else:\n do = 0\n if do:\n mv.append((1, x+1, b))\n for k in range(4):\n g,h = temp2[x][k]\n if g:\n temp2[x][k] = 0 \n temp2[x][(b+k) % 4] = (g,0)\n else:\n temp2[x][k] = 0\n # temp2[x][k] = (g,0)\n\nprint(len(mv))\nfor move in mv:\n for m in move:\n print(m,end=' ')\n print()\n",
"import sys\nfrom pprint import pprint\nsys.stdin = open('input.txt', 'r')\ncnt = 0\n\n\ndef moveR(x, n):\n global cnt\n tempX = mat[x]\n cnt += n\n for i in range(4):\n tempX[i] = mat[x][x + n % 4]\n if x + n < 4:\n tempX[i] = mat[x][x + n]\n elif x + n >= 4:\n tempX[i] = mat[x][x + n - 4]\n\n\ndef moveD(x, y, n):\n global cnt\n tempY = [mat[x][y]]\n pass\n\n\nmat = [list(map(int, input().split())) for _ in range(4)]\nmat2 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]\ntemp = {}\ntemp2 = [([(0, 0)] * 4) for _ in range(4)]\nfor x in range(4):\n for y in range(4):\n temp[mat2[x][y]] = x, y\nprint(temp)\nfor x in range(4):\n for y in range(4):\n if mat[x][y] != mat2[x][y]:\n x2, y2 = temp[mat[x][y]]\n print(mat[x][y], x, y)\n dx = 0\n dy = 0\n if x2 > x:\n dx = x2 - x\n elif x > x2:\n dx = 4 - (x - x2)\n if y2 > y:\n dy = y2 - y\n elif y > y2:\n dy = 4 - (y - y2)\n temp2[x][y] = dx, dy\npprint(temp2)\nfor x in range(4):\n for y in range(4):\n if mat[x][y] != mat2[x][y]:\n x2, y2 = temp[mat[x][y]]\n dx = 0\n dy = 0\n if x2 > x:\n dx = x2 - x\n elif x > x2:\n dx = 4 - (x - x2)\n if y2 > y:\n dy = y2 - y\n elif y > y2:\n dy = 4 - (y - y2)\n temp2[x][y] = dx, dy\npprint(temp2)\nmv = []\nfor x in range(4):\n for y in range(4):\n if temp2[x][y]:\n print(mv)\n a, b = temp2[x][y]\n if a:\n pprint(temp2)\n do = 1\n for i in range(4):\n c, d = temp2[i][y]\n if a == c:\n do = 1\n else:\n do = 0\n if do:\n mv.append((2, y + 1, a))\n for j in range(4):\n e, f = temp2[j][y]\n if f:\n temp2[j][y] = 0\n temp2[(a + j) % 4][y] = 0, f\n else:\n temp2[j][y] = 0\n elif b:\n pprint(temp2)\n do = 1\n for l in range(4):\n x3, y3 = temp2[x][l]\n if b == y3:\n do = 1\n else:\n do = 0\n if do:\n mv.append((1, x + 1, b))\n for k in range(4):\n g, h = temp2[x][k]\n if g:\n temp2[x][k] = 0\n temp2[x][(b + k) % 4] = g, 0\n else:\n temp2[x][k] = 0\nprint(len(mv))\nfor move in mv:\n for m in move:\n print(m, end=' ')\n print()\n",
"<import token>\nsys.stdin = open('input.txt', 'r')\ncnt = 0\n\n\ndef moveR(x, n):\n global cnt\n tempX = mat[x]\n cnt += n\n for i in range(4):\n tempX[i] = mat[x][x + n % 4]\n if x + n < 4:\n tempX[i] = mat[x][x + n]\n elif x + n >= 4:\n tempX[i] = mat[x][x + n - 4]\n\n\ndef moveD(x, y, n):\n global cnt\n tempY = [mat[x][y]]\n pass\n\n\nmat = [list(map(int, input().split())) for _ in range(4)]\nmat2 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]\ntemp = {}\ntemp2 = [([(0, 0)] * 4) for _ in range(4)]\nfor x in range(4):\n for y in range(4):\n temp[mat2[x][y]] = x, y\nprint(temp)\nfor x in range(4):\n for y in range(4):\n if mat[x][y] != mat2[x][y]:\n x2, y2 = temp[mat[x][y]]\n print(mat[x][y], x, y)\n dx = 0\n dy = 0\n if x2 > x:\n dx = x2 - x\n elif x > x2:\n dx = 4 - (x - x2)\n if y2 > y:\n dy = y2 - y\n elif y > y2:\n dy = 4 - (y - y2)\n temp2[x][y] = dx, dy\npprint(temp2)\nfor x in range(4):\n for y in range(4):\n if mat[x][y] != mat2[x][y]:\n x2, y2 = temp[mat[x][y]]\n dx = 0\n dy = 0\n if x2 > x:\n dx = x2 - x\n elif x > x2:\n dx = 4 - (x - x2)\n if y2 > y:\n dy = y2 - y\n elif y > y2:\n dy = 4 - (y - y2)\n temp2[x][y] = dx, dy\npprint(temp2)\nmv = []\nfor x in range(4):\n for y in range(4):\n if temp2[x][y]:\n print(mv)\n a, b = temp2[x][y]\n if a:\n pprint(temp2)\n do = 1\n for i in range(4):\n c, d = temp2[i][y]\n if a == c:\n do = 1\n else:\n do = 0\n if do:\n mv.append((2, y + 1, a))\n for j in range(4):\n e, f = temp2[j][y]\n if f:\n temp2[j][y] = 0\n temp2[(a + j) % 4][y] = 0, f\n else:\n temp2[j][y] = 0\n elif b:\n pprint(temp2)\n do = 1\n for l in range(4):\n x3, y3 = temp2[x][l]\n if b == y3:\n do = 1\n else:\n do = 0\n if do:\n mv.append((1, x + 1, b))\n for k in range(4):\n g, h = temp2[x][k]\n if g:\n temp2[x][k] = 0\n temp2[x][(b + k) % 4] = g, 0\n else:\n temp2[x][k] = 0\nprint(len(mv))\nfor move in mv:\n for m in move:\n print(m, end=' ')\n print()\n",
"<import token>\n<assignment token>\n\n\ndef moveR(x, n):\n global cnt\n tempX = mat[x]\n cnt += n\n for i in range(4):\n tempX[i] = mat[x][x + n % 4]\n if x + n < 4:\n tempX[i] = mat[x][x + n]\n elif x + n >= 4:\n tempX[i] = mat[x][x + n - 4]\n\n\ndef moveD(x, y, n):\n global cnt\n tempY = [mat[x][y]]\n pass\n\n\n<assignment token>\nfor x in range(4):\n for y in range(4):\n temp[mat2[x][y]] = x, y\nprint(temp)\nfor x in range(4):\n for y in range(4):\n if mat[x][y] != mat2[x][y]:\n x2, y2 = temp[mat[x][y]]\n print(mat[x][y], x, y)\n dx = 0\n dy = 0\n if x2 > x:\n dx = x2 - x\n elif x > x2:\n dx = 4 - (x - x2)\n if y2 > y:\n dy = y2 - y\n elif y > y2:\n dy = 4 - (y - y2)\n temp2[x][y] = dx, dy\npprint(temp2)\nfor x in range(4):\n for y in range(4):\n if mat[x][y] != mat2[x][y]:\n x2, y2 = temp[mat[x][y]]\n dx = 0\n dy = 0\n if x2 > x:\n dx = x2 - x\n elif x > x2:\n dx = 4 - (x - x2)\n if y2 > y:\n dy = y2 - y\n elif y > y2:\n dy = 4 - (y - y2)\n temp2[x][y] = dx, dy\npprint(temp2)\n<assignment token>\nfor x in range(4):\n for y in range(4):\n if temp2[x][y]:\n print(mv)\n a, b = temp2[x][y]\n if a:\n pprint(temp2)\n do = 1\n for i in range(4):\n c, d = temp2[i][y]\n if a == c:\n do = 1\n else:\n do = 0\n if do:\n mv.append((2, y + 1, a))\n for j in range(4):\n e, f = temp2[j][y]\n if f:\n temp2[j][y] = 0\n temp2[(a + j) % 4][y] = 0, f\n else:\n temp2[j][y] = 0\n elif b:\n pprint(temp2)\n do = 1\n for l in range(4):\n x3, y3 = temp2[x][l]\n if b == y3:\n do = 1\n else:\n do = 0\n if do:\n mv.append((1, x + 1, b))\n for k in range(4):\n g, h = temp2[x][k]\n if g:\n temp2[x][k] = 0\n temp2[x][(b + k) % 4] = g, 0\n else:\n temp2[x][k] = 0\nprint(len(mv))\nfor move in mv:\n for m in move:\n print(m, end=' ')\n print()\n",
"<import token>\n<assignment token>\n\n\ndef moveR(x, n):\n global cnt\n tempX = mat[x]\n cnt += n\n for i in range(4):\n tempX[i] = mat[x][x + n % 4]\n if x + n < 4:\n tempX[i] = mat[x][x + n]\n elif x + n >= 4:\n tempX[i] = mat[x][x + n - 4]\n\n\ndef moveD(x, y, n):\n global cnt\n tempY = [mat[x][y]]\n pass\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef moveD(x, y, n):\n global cnt\n tempY = [mat[x][y]]\n pass\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,303 |
a851c4501197c7029be0107c7a244c82e6c225aa
|
from program.models import Program, Picture
from operators.models import Operator
from django.contrib import admin
admin.site.register(Program)
admin.site.register(Picture)
admin.site.register(Operator)
|
[
"from program.models import Program, Picture\nfrom operators.models import Operator\nfrom django.contrib import admin\n\nadmin.site.register(Program)\nadmin.site.register(Picture)\nadmin.site.register(Operator)",
"from program.models import Program, Picture\nfrom operators.models import Operator\nfrom django.contrib import admin\nadmin.site.register(Program)\nadmin.site.register(Picture)\nadmin.site.register(Operator)\n",
"<import token>\nadmin.site.register(Program)\nadmin.site.register(Picture)\nadmin.site.register(Operator)\n",
"<import token>\n<code token>\n"
] | false |
98,304 |
d67d1dd46dfb65de5623068d77cf362299710be0
|
import cv2
import numpy as np
import random
import torch
def view_dataset(dset):
"""
:param dset:
img: torch.Size([3, 512, 640])
bboxes: torch.Size([12, 4])
labels: torch.Size([12, 1])
masks: torch.Size([12, 510, 621])
:return:
"""
cv2.namedWindow('img')
for idx in range(len(dset)):
img, bboxes, labels, masks = dset.__getitem__(idx)
img = img.numpy().transpose(1,2,0)
bboxes = bboxes.numpy()
labels = labels.numpy()
masks = masks.numpy()
for i in range(bboxes.shape[0]):
y1, x1, y2, x2 = bboxes[i,:]
cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 255), 2, lineType=1)
# view segmentation
cur_gt_mask = masks[i, :, :]
mask = np.zeros(cur_gt_mask.shape, dtype=np.float32)
mask[cur_gt_mask == 1] = 1.
color = (random.random(), random.random(), random.random())
mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2)
mskd = img * mask
clmsk = np.ones(mask.shape) * mask
clmsk[:, :, 0] = clmsk[:, :, 0] * color[0] * 256
clmsk[:, :, 1] = clmsk[:, :, 1] * color[1] * 256
clmsk[:, :, 2] = clmsk[:, :, 2] * color[2] * 256
img = img + 0.8 * clmsk - 0.8 * mskd
###########################
cv2.imshow('img', np.uint8(img))
k = cv2.waitKey(0)
if k & 0xFF == ord('q'):
cv2.destroyAllWindows()
exit()
cv2.destroyAllWindows()
def view_detections(inputs, detections):
"""
:param inputs: torch.Size([2, 3, 512, 640])
:param detections: torch.Size([2, 2, 200, 5])
:return:
"""
cv2.namedWindow('img')
for i in range(inputs.shape[0]):
img = inputs[i,:,:,:].data.cpu().numpy().transpose(1,2,0)
img = np.uint8(img).copy()
det = detections[i,1,:,:]
mask = det[:, 0].gt(0.).expand(5, det.size(0)).t()
det = torch.masked_select(det, mask).view(-1, 5)
if det.shape[0] == 0:
continue
boxes = det[:, 1:].cpu().numpy()
scores = det[:, 0].cpu().numpy()
for box, score in zip(boxes, scores):
y1, x1, y2, x2 = box
y1 = int(y1)
x1 = int(x1)
y2 = int(y2)
x2 = int(x2)
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2, 2)
cv2.putText(img,
"%.2f" % score,
(x1, y1 + 20),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
(255, 255, 255))
cv2.imshow('img', img)
k = cv2.waitKey(0)
if k&0xFF==ord('q'):
cv2.destroyAllWindows()
exit()
cv2.destroyAllWindows()
|
[
"import cv2\nimport numpy as np\nimport random\nimport torch\n\ndef view_dataset(dset):\n \"\"\"\n :param dset:\n img: torch.Size([3, 512, 640])\n bboxes: torch.Size([12, 4])\n labels: torch.Size([12, 1])\n masks: torch.Size([12, 510, 621])\n :return:\n \"\"\"\n cv2.namedWindow('img')\n for idx in range(len(dset)):\n img, bboxes, labels, masks = dset.__getitem__(idx)\n img = img.numpy().transpose(1,2,0)\n bboxes = bboxes.numpy()\n labels = labels.numpy()\n masks = masks.numpy()\n for i in range(bboxes.shape[0]):\n y1, x1, y2, x2 = bboxes[i,:]\n cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 255), 2, lineType=1)\n\n # view segmentation\n cur_gt_mask = masks[i, :, :]\n mask = np.zeros(cur_gt_mask.shape, dtype=np.float32)\n mask[cur_gt_mask == 1] = 1.\n color = (random.random(), random.random(), random.random())\n mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2)\n mskd = img * mask\n clmsk = np.ones(mask.shape) * mask\n clmsk[:, :, 0] = clmsk[:, :, 0] * color[0] * 256\n clmsk[:, :, 1] = clmsk[:, :, 1] * color[1] * 256\n clmsk[:, :, 2] = clmsk[:, :, 2] * color[2] * 256\n img = img + 0.8 * clmsk - 0.8 * mskd\n ###########################\n\n cv2.imshow('img', np.uint8(img))\n k = cv2.waitKey(0)\n if k & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n exit()\n cv2.destroyAllWindows()\n\n\ndef view_detections(inputs, detections):\n \"\"\"\n :param inputs: torch.Size([2, 3, 512, 640])\n :param detections: torch.Size([2, 2, 200, 5])\n :return:\n \"\"\"\n cv2.namedWindow('img')\n for i in range(inputs.shape[0]):\n img = inputs[i,:,:,:].data.cpu().numpy().transpose(1,2,0)\n img = np.uint8(img).copy()\n det = detections[i,1,:,:]\n\n mask = det[:, 0].gt(0.).expand(5, det.size(0)).t()\n det = torch.masked_select(det, mask).view(-1, 5)\n if det.shape[0] == 0:\n continue\n boxes = det[:, 1:].cpu().numpy()\n scores = det[:, 0].cpu().numpy()\n for box, score in zip(boxes, scores):\n y1, x1, y2, x2 = box\n y1 = int(y1)\n x1 = int(x1)\n y2 = int(y2)\n x2 = int(x2)\n\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2, 2)\n cv2.putText(img,\n \"%.2f\" % score,\n (x1, y1 + 20),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.6,\n (255, 255, 255))\n\n cv2.imshow('img', img)\n k = cv2.waitKey(0)\n if k&0xFF==ord('q'):\n cv2.destroyAllWindows()\n exit()\n cv2.destroyAllWindows()\n",
"import cv2\nimport numpy as np\nimport random\nimport torch\n\n\ndef view_dataset(dset):\n \"\"\"\n :param dset:\n img: torch.Size([3, 512, 640])\n bboxes: torch.Size([12, 4])\n labels: torch.Size([12, 1])\n masks: torch.Size([12, 510, 621])\n :return:\n \"\"\"\n cv2.namedWindow('img')\n for idx in range(len(dset)):\n img, bboxes, labels, masks = dset.__getitem__(idx)\n img = img.numpy().transpose(1, 2, 0)\n bboxes = bboxes.numpy()\n labels = labels.numpy()\n masks = masks.numpy()\n for i in range(bboxes.shape[0]):\n y1, x1, y2, x2 = bboxes[i, :]\n cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 255), 2,\n lineType=1)\n cur_gt_mask = masks[i, :, :]\n mask = np.zeros(cur_gt_mask.shape, dtype=np.float32)\n mask[cur_gt_mask == 1] = 1.0\n color = random.random(), random.random(), random.random()\n mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2)\n mskd = img * mask\n clmsk = np.ones(mask.shape) * mask\n clmsk[:, :, 0] = clmsk[:, :, 0] * color[0] * 256\n clmsk[:, :, 1] = clmsk[:, :, 1] * color[1] * 256\n clmsk[:, :, 2] = clmsk[:, :, 2] * color[2] * 256\n img = img + 0.8 * clmsk - 0.8 * mskd\n cv2.imshow('img', np.uint8(img))\n k = cv2.waitKey(0)\n if k & 255 == ord('q'):\n cv2.destroyAllWindows()\n exit()\n cv2.destroyAllWindows()\n\n\ndef view_detections(inputs, detections):\n \"\"\"\n :param inputs: torch.Size([2, 3, 512, 640])\n :param detections: torch.Size([2, 2, 200, 5])\n :return:\n \"\"\"\n cv2.namedWindow('img')\n for i in range(inputs.shape[0]):\n img = inputs[i, :, :, :].data.cpu().numpy().transpose(1, 2, 0)\n img = np.uint8(img).copy()\n det = detections[i, 1, :, :]\n mask = det[:, 0].gt(0.0).expand(5, det.size(0)).t()\n det = torch.masked_select(det, mask).view(-1, 5)\n if det.shape[0] == 0:\n continue\n boxes = det[:, 1:].cpu().numpy()\n scores = det[:, 0].cpu().numpy()\n for box, score in zip(boxes, scores):\n y1, x1, y2, x2 = box\n y1 = int(y1)\n x1 = int(x1)\n y2 = int(y2)\n x2 = int(x2)\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2, 2)\n cv2.putText(img, '%.2f' % score, (x1, y1 + 20), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow('img', img)\n k = cv2.waitKey(0)\n if k & 255 == ord('q'):\n cv2.destroyAllWindows()\n exit()\n cv2.destroyAllWindows()\n",
"<import token>\n\n\ndef view_dataset(dset):\n \"\"\"\n :param dset:\n img: torch.Size([3, 512, 640])\n bboxes: torch.Size([12, 4])\n labels: torch.Size([12, 1])\n masks: torch.Size([12, 510, 621])\n :return:\n \"\"\"\n cv2.namedWindow('img')\n for idx in range(len(dset)):\n img, bboxes, labels, masks = dset.__getitem__(idx)\n img = img.numpy().transpose(1, 2, 0)\n bboxes = bboxes.numpy()\n labels = labels.numpy()\n masks = masks.numpy()\n for i in range(bboxes.shape[0]):\n y1, x1, y2, x2 = bboxes[i, :]\n cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 255), 2,\n lineType=1)\n cur_gt_mask = masks[i, :, :]\n mask = np.zeros(cur_gt_mask.shape, dtype=np.float32)\n mask[cur_gt_mask == 1] = 1.0\n color = random.random(), random.random(), random.random()\n mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2)\n mskd = img * mask\n clmsk = np.ones(mask.shape) * mask\n clmsk[:, :, 0] = clmsk[:, :, 0] * color[0] * 256\n clmsk[:, :, 1] = clmsk[:, :, 1] * color[1] * 256\n clmsk[:, :, 2] = clmsk[:, :, 2] * color[2] * 256\n img = img + 0.8 * clmsk - 0.8 * mskd\n cv2.imshow('img', np.uint8(img))\n k = cv2.waitKey(0)\n if k & 255 == ord('q'):\n cv2.destroyAllWindows()\n exit()\n cv2.destroyAllWindows()\n\n\ndef view_detections(inputs, detections):\n \"\"\"\n :param inputs: torch.Size([2, 3, 512, 640])\n :param detections: torch.Size([2, 2, 200, 5])\n :return:\n \"\"\"\n cv2.namedWindow('img')\n for i in range(inputs.shape[0]):\n img = inputs[i, :, :, :].data.cpu().numpy().transpose(1, 2, 0)\n img = np.uint8(img).copy()\n det = detections[i, 1, :, :]\n mask = det[:, 0].gt(0.0).expand(5, det.size(0)).t()\n det = torch.masked_select(det, mask).view(-1, 5)\n if det.shape[0] == 0:\n continue\n boxes = det[:, 1:].cpu().numpy()\n scores = det[:, 0].cpu().numpy()\n for box, score in zip(boxes, scores):\n y1, x1, y2, x2 = box\n y1 = int(y1)\n x1 = int(x1)\n y2 = int(y2)\n x2 = int(x2)\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2, 2)\n cv2.putText(img, '%.2f' % score, (x1, y1 + 20), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow('img', img)\n k = cv2.waitKey(0)\n if k & 255 == ord('q'):\n cv2.destroyAllWindows()\n exit()\n cv2.destroyAllWindows()\n",
"<import token>\n<function token>\n\n\ndef view_detections(inputs, detections):\n \"\"\"\n :param inputs: torch.Size([2, 3, 512, 640])\n :param detections: torch.Size([2, 2, 200, 5])\n :return:\n \"\"\"\n cv2.namedWindow('img')\n for i in range(inputs.shape[0]):\n img = inputs[i, :, :, :].data.cpu().numpy().transpose(1, 2, 0)\n img = np.uint8(img).copy()\n det = detections[i, 1, :, :]\n mask = det[:, 0].gt(0.0).expand(5, det.size(0)).t()\n det = torch.masked_select(det, mask).view(-1, 5)\n if det.shape[0] == 0:\n continue\n boxes = det[:, 1:].cpu().numpy()\n scores = det[:, 0].cpu().numpy()\n for box, score in zip(boxes, scores):\n y1, x1, y2, x2 = box\n y1 = int(y1)\n x1 = int(x1)\n y2 = int(y2)\n x2 = int(x2)\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2, 2)\n cv2.putText(img, '%.2f' % score, (x1, y1 + 20), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow('img', img)\n k = cv2.waitKey(0)\n if k & 255 == ord('q'):\n cv2.destroyAllWindows()\n exit()\n cv2.destroyAllWindows()\n",
"<import token>\n<function token>\n<function token>\n"
] | false |
98,305 |
cda59a5c677b22bfd59f53cf5a8218588adbcf77
|
# -*- coding: utf-8 -*-
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""Definition for tokens, languages, documents and doclists, to store
the results of extraction, and express in XML.
For the XML format see dochandler.py
"""
__author__ = """
[email protected] (Richard Sproat)
[email protected] (Kristy Hollingshead)
"""
import xml.sax.saxutils
from math import sqrt
from __init__ import BASE_
import documents
XML_HEADER_ = '<?xml version="1.0" encoding="UTF-8"?>'
LANG_INDENT_ = ' ' * 4
TOKEN_INDENT_ = ' ' * 6
def SumProd(x, y):
return sum(map(lambda x, y: x * y, x, y))
class Token:
"""A token is a term extracted from text, with attributes
count, pronunciation, morphological decomposition
"""
def __init__(self, string):
try: self.string_ = string.encode('utf-8')
except UnicodeDecodeError: self.string_ = string
self.count_ = 1
self.morphs_ = []
self.pronunciations_ = []
self.frequencies_ = []
self.langid_ = ''
def __eq__(self, other):
skey = self.EncodeForHash()
okey = other.EncodeForHash()
return skey == okey
def __repr__(self):
return '#<%s %d %s %s %s>' % (self.string_,
self.count_,
self.morphs_,
self.pronunciations_,
self.langid_)
def XmlEncode(self):
xml_string_ = '<token count="%d" morphs="%s" prons="%s">%s</token>'
morphs = ' '.join(self.morphs_)
morphs = xml.sax.saxutils.escape(morphs)
prons = ' ; '.join(self.pronunciations_)
prons = xml.sax.saxutils.escape(prons)
string_ = xml.sax.saxutils.escape(self.string_)
xml_result = xml_string_ % (self.count_, morphs, prons, string_)
return TOKEN_INDENT_ + xml_result
def EncodeForHash(self):
return '%s<%s><%s><%s>' % (self.String(),
' '.join(self.Morphs()),
' '.join(self.Pronunciations()),
self.LangId())
def String(self):
return self.string_
def SetCount(self, count):
self.count_ = count
def IncrementCount(self, increment = 1):
self.count_ += increment
def Count(self):
return self.count_
def AddPronunciation(self, pron):
if pron not in self.pronunciations_:
try: self.pronunciations_.append(pron.encode('utf-8'))
except UnicodeDecodeError: self.pronunciations_.append(pron)
def Pronunciations(self):
return self.pronunciations_
def SetMorphs(self, morphs):
self.morphs_ = []
for m in morphs:
try: self.morphs_.append(m.encode('utf-8'))
except UnicodeDecodeError: self.morphs_.append(m)
def Morphs(self):
return self.morphs_
def SetLangId(self, lang):
self.langid_ = lang
def LangId(self):
return self.langid_
class TokenFreqStats:
"""Holder for token frequency-statistics such as
relative frequency-counts and variance.
"""
def __init__(self, tok):
self.token_ = tok
self.frequencies_ = []
self.freqsum_ = 0
self.freqsumsq_ = 0
self.variance_ = 0
def __repr__(self):
return '#<%s %s %.6f %.6f %.6f>' % (self.token_,
self.frequencies_,
self.freqsum_,
self.freqsumsq_,
self.variance_)
def Token(self):
return self.token_
def Frequencies(self):
return self.frequencies_
def AddFrequency(self, f):
self.frequencies_.append(f)
def SetFrequencies(self, freq):
self.frequencies_ = []
for f in freq:
self.frequencies_.append(f)
def NormFrequencies(self):
self.frequencies_ = [float(f) for f in self.frequencies_]
sumfreqs = float(sum(self.frequencies_))
if sumfreqs != 0.0:
self.frequencies_ = [f/sumfreqs for f in self.frequencies_]
def CalcFreqStats(self):
n = len(self.frequencies_)
self.freqsum_ = float(sum(self.frequencies_))
self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)
self.variance_ = self.freqsumsq_/n - (self.freqsum_**2)/(n**2)
def FreqSum(self):
return self.freqsum_
def FreqVariance(self):
return self.variance_
class DocTokenStats:
"""Holder for Doclist-specific token statistics, such as frequency
counts. Also allows for calculation of pairwise comparison metrics
such as Pearson's correlation.
"""
def __init__(self, doclist=None):
if doclist is None:
self.doclist_ = documents.Doclist()
else: self.doclist_ = doclist
self.n_ = len(self.doclist_.Docs())
self.tokstats_ = {}
def InitTokenStats(self, tok):
tstats = TokenFreqStats(tok)
tfreq = []
for doc in self.doclist_.Docs():
c = 0
for lang in doc.Langs():
if tok.LangId() != lang.Id(): continue
tmptok = lang.MatchToken(tok)
if tmptok is not None:
c += tmptok.Count()
tfreq.append(c)
tstats.SetFrequencies(tfreq)
tstats.NormFrequencies()
tstats.CalcFreqStats()
self.tokstats_[tok.EncodeForHash()] = tstats
return tstats
def AddTokenStats(self, tstats):
tokhash = tstats.Token().EncodeForHash()
if tokhash not in self.tokstats_:
self.tokstats_[tokhash] = tstats
def GetTokenStats(self, tok):
try: return self.tokstats_[tok.EncodeForHash()]
except KeyError: return self.InitTokenStats(tok)
def TokenStats(self):
return self.tokstats_.values()
def SetN(self, n):
self.n_ = n
def GetN(self):
return self.n_
def PearsonsCorrelation(self, token1, token2):
stats1 = self.GetTokenStats(token1)
stats2 = self.GetTokenStats(token2)
freq1 = stats1.Frequencies()
freq2 = stats2.Frequencies()
sumxy = sum(map(lambda x, y: x * y, freq1, freq2))
covxy = sumxy/float(self.n_) - \
(stats1.FreqSum()*stats2.FreqSum())/float(self.n_**2)
try:
rho = covxy/sqrt(stats1.FreqVariance()*stats2.FreqVariance())
except ZeroDivisionError:
rho = 0.0
#print x.String(),y.String(),sumx2,sumy2,varx,vary,sumxy,covxy,rho
return rho
class Lang:
"""Holder for tokens in a language.
"""
def __init__(self):
self.id_ = ''
self.tokens_ = []
def XmlEncode(self):
if len(self.tokens_) == 0: return ''
xml_string_ = '<lang id="%s">\n%s\n%s</lang>'
xml_tokens = []
for token_ in self.Tokens():
xml_tokens.append(token_.XmlEncode())
xml_result = xml_string_ % (self.id_, '\n'.join(xml_tokens),
LANG_INDENT_)
return LANG_INDENT_ + xml_result
def Id(self):
return self.id_
def SetId(self, id):
self.id_ = id.encode('utf-8')
def Tokens(self):
return self.tokens_
def SetTokens(self, tokens):
self.tokens_ = []
for t in tokens:
self.AddToken(t)
def AddToken(self, token, merge=False):
"""If an identical token already exists in dictionary,
will merge tokens and cumulate their counts. Checks to
see that morphology and pronunciations are identical,
otherwise the tokens will not be merged.
"""
token.SetLangId(self.id_)
if not merge:
self.tokens_.append(token)
else:
exists = self.MatchToken(token)
if exists is None:
self.tokens_.append(token)
else:
exists.IncrementCount(token.Count())
def MatchToken(self, token):
try:
i = self.tokens_.index(token)
return self.tokens_[i]
except ValueError:
return None
def CompactTokens(self):
"""Merge identical tokens and cumulate their counts. Checks to see
that morphology and pronunciations are identical, otherwise the
tokens will not be merged.
"""
map = {}
for token_ in self.tokens_:
hash_string = token_.EncodeForHash()
try: map[hash_string].append(token_)
except KeyError: map[hash_string] = [token_]
ntokens = []
keys = map.keys()
keys.sort()
for k in keys:
token_ = map[k][0]
for otoken in map[k][1:]:
token_.IncrementCount(otoken.Count())
ntokens.append(token_)
self.tokens_ = ntokens
|
[
"# -*- coding: utf-8 -*-\n\n## Licensed under the Apache License, Version 2.0 (the \"License\");\n## you may not use this file except in compliance with the License.\n## You may obtain a copy of the License at\n##\n## http://www.apache.org/licenses/LICENSE-2.0\n##\n## Unless required by applicable law or agreed to in writing, software\n## distributed under the License is distributed on an \"AS IS\" BASIS,\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n## See the License for the specific language governing permissions and\n## limitations under the License.\n\n\"\"\"Definition for tokens, languages, documents and doclists, to store\nthe results of extraction, and express in XML.\n\nFor the XML format see dochandler.py\n\"\"\"\n\n__author__ = \"\"\"\[email protected] (Richard Sproat)\[email protected] (Kristy Hollingshead)\n\"\"\"\n\nimport xml.sax.saxutils\nfrom math import sqrt\nfrom __init__ import BASE_\nimport documents\n\nXML_HEADER_ = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\nLANG_INDENT_ = ' ' * 4\nTOKEN_INDENT_ = ' ' * 6\n\ndef SumProd(x, y):\n return sum(map(lambda x, y: x * y, x, y))\n\nclass Token:\n \"\"\"A token is a term extracted from text, with attributes\n count, pronunciation, morphological decomposition\n \"\"\"\n def __init__(self, string):\n try: self.string_ = string.encode('utf-8')\n except UnicodeDecodeError: self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n\n def __eq__(self, other):\n skey = self.EncodeForHash()\n okey = other.EncodeForHash()\n return skey == okey\n\n def __repr__(self):\n return '#<%s %d %s %s %s>' % (self.string_,\n self.count_,\n self.morphs_,\n self.pronunciations_,\n self.langid_)\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n\n def EncodeForHash(self):\n return '%s<%s><%s><%s>' % (self.String(),\n ' '.join(self.Morphs()),\n ' '.join(self.Pronunciations()),\n self.LangId())\n\n def String(self):\n return self.string_\n\n def SetCount(self, count):\n self.count_ = count\n\n def IncrementCount(self, increment = 1):\n self.count_ += increment\n\n def Count(self):\n return self.count_\n\n def AddPronunciation(self, pron):\n if pron not in self.pronunciations_:\n try: self.pronunciations_.append(pron.encode('utf-8'))\n except UnicodeDecodeError: self.pronunciations_.append(pron)\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try: self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError: self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_,\n self.frequencies_,\n self.freqsum_,\n self.freqsumsq_,\n self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [f/sumfreqs for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_/n - (self.freqsum_**2)/(n**2)\n\n def FreqSum(self):\n return self.freqsum_\n \n def FreqVariance(self):\n return self.variance_\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else: self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id(): continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try: return self.tokstats_[tok.EncodeForHash()]\n except KeyError: return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy/float(self.n_) - \\\n (stats1.FreqSum()*stats2.FreqSum())/float(self.n_**2)\n try:\n rho = covxy/sqrt(stats1.FreqVariance()*stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n #print x.String(),y.String(),sumx2,sumy2,varx,vary,sumxy,covxy,rho\n return rho\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0: return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try: map[hash_string].append(token_)\n except KeyError: map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n\n",
"<docstring token>\n__author__ = \"\"\"\[email protected] (Richard Sproat)\[email protected] (Kristy Hollingshead)\n\"\"\"\nimport xml.sax.saxutils\nfrom math import sqrt\nfrom __init__ import BASE_\nimport documents\nXML_HEADER_ = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\nLANG_INDENT_ = ' ' * 4\nTOKEN_INDENT_ = ' ' * 6\n\n\ndef SumProd(x, y):\n return sum(map(lambda x, y: x * y, x, y))\n\n\nclass Token:\n \"\"\"A token is a term extracted from text, with attributes\n count, pronunciation, morphological decomposition\n \"\"\"\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n\n def __eq__(self, other):\n skey = self.EncodeForHash()\n okey = other.EncodeForHash()\n return skey == okey\n\n def __repr__(self):\n return '#<%s %d %s %s %s>' % (self.string_, self.count_, self.\n morphs_, self.pronunciations_, self.langid_)\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n\n def EncodeForHash(self):\n return '%s<%s><%s><%s>' % (self.String(), ' '.join(self.Morphs()),\n ' '.join(self.Pronunciations()), self.LangId())\n\n def String(self):\n return self.string_\n\n def SetCount(self, count):\n self.count_ = count\n\n def IncrementCount(self, increment=1):\n self.count_ += increment\n\n def Count(self):\n return self.count_\n\n def AddPronunciation(self, pron):\n if pron not in self.pronunciations_:\n try:\n self.pronunciations_.append(pron.encode('utf-8'))\n except UnicodeDecodeError:\n self.pronunciations_.append(pron)\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n__author__ = \"\"\"\[email protected] (Richard Sproat)\[email protected] (Kristy Hollingshead)\n\"\"\"\n<import token>\nXML_HEADER_ = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\nLANG_INDENT_ = ' ' * 4\nTOKEN_INDENT_ = ' ' * 6\n\n\ndef SumProd(x, y):\n return sum(map(lambda x, y: x * y, x, y))\n\n\nclass Token:\n \"\"\"A token is a term extracted from text, with attributes\n count, pronunciation, morphological decomposition\n \"\"\"\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n\n def __eq__(self, other):\n skey = self.EncodeForHash()\n okey = other.EncodeForHash()\n return skey == okey\n\n def __repr__(self):\n return '#<%s %d %s %s %s>' % (self.string_, self.count_, self.\n morphs_, self.pronunciations_, self.langid_)\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n\n def EncodeForHash(self):\n return '%s<%s><%s><%s>' % (self.String(), ' '.join(self.Morphs()),\n ' '.join(self.Pronunciations()), self.LangId())\n\n def String(self):\n return self.string_\n\n def SetCount(self, count):\n self.count_ = count\n\n def IncrementCount(self, increment=1):\n self.count_ += increment\n\n def Count(self):\n return self.count_\n\n def AddPronunciation(self, pron):\n if pron not in self.pronunciations_:\n try:\n self.pronunciations_.append(pron.encode('utf-8'))\n except UnicodeDecodeError:\n self.pronunciations_.append(pron)\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef SumProd(x, y):\n return sum(map(lambda x, y: x * y, x, y))\n\n\nclass Token:\n \"\"\"A token is a term extracted from text, with attributes\n count, pronunciation, morphological decomposition\n \"\"\"\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n\n def __eq__(self, other):\n skey = self.EncodeForHash()\n okey = other.EncodeForHash()\n return skey == okey\n\n def __repr__(self):\n return '#<%s %d %s %s %s>' % (self.string_, self.count_, self.\n morphs_, self.pronunciations_, self.langid_)\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n\n def EncodeForHash(self):\n return '%s<%s><%s><%s>' % (self.String(), ' '.join(self.Morphs()),\n ' '.join(self.Pronunciations()), self.LangId())\n\n def String(self):\n return self.string_\n\n def SetCount(self, count):\n self.count_ = count\n\n def IncrementCount(self, increment=1):\n self.count_ += increment\n\n def Count(self):\n return self.count_\n\n def AddPronunciation(self, pron):\n if pron not in self.pronunciations_:\n try:\n self.pronunciations_.append(pron.encode('utf-8'))\n except UnicodeDecodeError:\n self.pronunciations_.append(pron)\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n \"\"\"A token is a term extracted from text, with attributes\n count, pronunciation, morphological decomposition\n \"\"\"\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n\n def __eq__(self, other):\n skey = self.EncodeForHash()\n okey = other.EncodeForHash()\n return skey == okey\n\n def __repr__(self):\n return '#<%s %d %s %s %s>' % (self.string_, self.count_, self.\n morphs_, self.pronunciations_, self.langid_)\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n\n def EncodeForHash(self):\n return '%s<%s><%s><%s>' % (self.String(), ' '.join(self.Morphs()),\n ' '.join(self.Pronunciations()), self.LangId())\n\n def String(self):\n return self.string_\n\n def SetCount(self, count):\n self.count_ = count\n\n def IncrementCount(self, increment=1):\n self.count_ += increment\n\n def Count(self):\n return self.count_\n\n def AddPronunciation(self, pron):\n if pron not in self.pronunciations_:\n try:\n self.pronunciations_.append(pron.encode('utf-8'))\n except UnicodeDecodeError:\n self.pronunciations_.append(pron)\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n\n def __eq__(self, other):\n skey = self.EncodeForHash()\n okey = other.EncodeForHash()\n return skey == okey\n\n def __repr__(self):\n return '#<%s %d %s %s %s>' % (self.string_, self.count_, self.\n morphs_, self.pronunciations_, self.langid_)\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n\n def EncodeForHash(self):\n return '%s<%s><%s><%s>' % (self.String(), ' '.join(self.Morphs()),\n ' '.join(self.Pronunciations()), self.LangId())\n\n def String(self):\n return self.string_\n\n def SetCount(self, count):\n self.count_ = count\n\n def IncrementCount(self, increment=1):\n self.count_ += increment\n\n def Count(self):\n return self.count_\n\n def AddPronunciation(self, pron):\n if pron not in self.pronunciations_:\n try:\n self.pronunciations_.append(pron.encode('utf-8'))\n except UnicodeDecodeError:\n self.pronunciations_.append(pron)\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n <function token>\n\n def __repr__(self):\n return '#<%s %d %s %s %s>' % (self.string_, self.count_, self.\n morphs_, self.pronunciations_, self.langid_)\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n\n def EncodeForHash(self):\n return '%s<%s><%s><%s>' % (self.String(), ' '.join(self.Morphs()),\n ' '.join(self.Pronunciations()), self.LangId())\n\n def String(self):\n return self.string_\n\n def SetCount(self, count):\n self.count_ = count\n\n def IncrementCount(self, increment=1):\n self.count_ += increment\n\n def Count(self):\n return self.count_\n\n def AddPronunciation(self, pron):\n if pron not in self.pronunciations_:\n try:\n self.pronunciations_.append(pron.encode('utf-8'))\n except UnicodeDecodeError:\n self.pronunciations_.append(pron)\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n <function token>\n\n def __repr__(self):\n return '#<%s %d %s %s %s>' % (self.string_, self.count_, self.\n morphs_, self.pronunciations_, self.langid_)\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n\n def EncodeForHash(self):\n return '%s<%s><%s><%s>' % (self.String(), ' '.join(self.Morphs()),\n ' '.join(self.Pronunciations()), self.LangId())\n\n def String(self):\n return self.string_\n\n def SetCount(self, count):\n self.count_ = count\n <function token>\n\n def Count(self):\n return self.count_\n\n def AddPronunciation(self, pron):\n if pron not in self.pronunciations_:\n try:\n self.pronunciations_.append(pron.encode('utf-8'))\n except UnicodeDecodeError:\n self.pronunciations_.append(pron)\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n <function token>\n\n def __repr__(self):\n return '#<%s %d %s %s %s>' % (self.string_, self.count_, self.\n morphs_, self.pronunciations_, self.langid_)\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n\n def EncodeForHash(self):\n return '%s<%s><%s><%s>' % (self.String(), ' '.join(self.Morphs()),\n ' '.join(self.Pronunciations()), self.LangId())\n\n def String(self):\n return self.string_\n\n def SetCount(self, count):\n self.count_ = count\n <function token>\n\n def Count(self):\n return self.count_\n <function token>\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n <function token>\n <function token>\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n\n def EncodeForHash(self):\n return '%s<%s><%s><%s>' % (self.String(), ' '.join(self.Morphs()),\n ' '.join(self.Pronunciations()), self.LangId())\n\n def String(self):\n return self.string_\n\n def SetCount(self, count):\n self.count_ = count\n <function token>\n\n def Count(self):\n return self.count_\n <function token>\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n <function token>\n <function token>\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n <function token>\n\n def String(self):\n return self.string_\n\n def SetCount(self, count):\n self.count_ = count\n <function token>\n\n def Count(self):\n return self.count_\n <function token>\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n <function token>\n <function token>\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n <function token>\n\n def String(self):\n return self.string_\n\n def SetCount(self, count):\n self.count_ = count\n <function token>\n <function token>\n <function token>\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n <function token>\n <function token>\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n <function token>\n\n def String(self):\n return self.string_\n <function token>\n <function token>\n <function token>\n <function token>\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n\n def __init__(self, string):\n try:\n self.string_ = string.encode('utf-8')\n except UnicodeDecodeError:\n self.string_ = string\n self.count_ = 1\n self.morphs_ = []\n self.pronunciations_ = []\n self.frequencies_ = []\n self.langid_ = ''\n <function token>\n <function token>\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n\n def Morphs(self):\n return self.morphs_\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def Pronunciations(self):\n return self.pronunciations_\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n <function token>\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n <function token>\n\n def SetLangId(self, lang):\n self.langid_ = lang\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def SetMorphs(self, morphs):\n self.morphs_ = []\n for m in morphs:\n try:\n self.morphs_.append(m.encode('utf-8'))\n except UnicodeDecodeError:\n self.morphs_.append(m)\n <function token>\n <function token>\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def LangId(self):\n return self.langid_\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def XmlEncode(self):\n xml_string_ = '<token count=\"%d\" morphs=\"%s\" prons=\"%s\">%s</token>'\n morphs = ' '.join(self.morphs_)\n morphs = xml.sax.saxutils.escape(morphs)\n prons = ' ; '.join(self.pronunciations_)\n prons = xml.sax.saxutils.escape(prons)\n string_ = xml.sax.saxutils.escape(self.string_)\n xml_result = xml_string_ % (self.count_, morphs, prons, string_)\n return TOKEN_INDENT_ + xml_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass Token:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass TokenFreqStats:\n \"\"\"Holder for token frequency-statistics such as\n relative frequency-counts and variance.\n \"\"\"\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass TokenFreqStats:\n <docstring token>\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n\n def Token(self):\n return self.token_\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass TokenFreqStats:\n <docstring token>\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n <function token>\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n\n def FreqSum(self):\n return self.freqsum_\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass TokenFreqStats:\n <docstring token>\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n <function token>\n\n def Frequencies(self):\n return self.frequencies_\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n <function token>\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass TokenFreqStats:\n <docstring token>\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n <function token>\n <function token>\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n\n def CalcFreqStats(self):\n n = len(self.frequencies_)\n self.freqsum_ = float(sum(self.frequencies_))\n self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)\n self.variance_ = self.freqsumsq_ / n - self.freqsum_ ** 2 / n ** 2\n <function token>\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass TokenFreqStats:\n <docstring token>\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n <function token>\n <function token>\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n\n def NormFrequencies(self):\n self.frequencies_ = [float(f) for f in self.frequencies_]\n sumfreqs = float(sum(self.frequencies_))\n if sumfreqs != 0.0:\n self.frequencies_ = [(f / sumfreqs) for f in self.frequencies_]\n <function token>\n <function token>\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass TokenFreqStats:\n <docstring token>\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n\n def __repr__(self):\n return '#<%s %s %.6f %.6f %.6f>' % (self.token_, self.frequencies_,\n self.freqsum_, self.freqsumsq_, self.variance_)\n <function token>\n <function token>\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n <function token>\n <function token>\n <function token>\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass TokenFreqStats:\n <docstring token>\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n <function token>\n <function token>\n <function token>\n\n def AddFrequency(self, f):\n self.frequencies_.append(f)\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n <function token>\n <function token>\n <function token>\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass TokenFreqStats:\n <docstring token>\n\n def __init__(self, tok):\n self.token_ = tok\n self.frequencies_ = []\n self.freqsum_ = 0\n self.freqsumsq_ = 0\n self.variance_ = 0\n <function token>\n <function token>\n <function token>\n <function token>\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n <function token>\n <function token>\n <function token>\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass TokenFreqStats:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def SetFrequencies(self, freq):\n self.frequencies_ = []\n for f in freq:\n self.frequencies_.append(f)\n <function token>\n <function token>\n <function token>\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass TokenFreqStats:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def FreqVariance(self):\n return self.variance_\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass TokenFreqStats:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n\n\nclass DocTokenStats:\n \"\"\"Holder for Doclist-specific token statistics, such as frequency\n counts. Also allows for calculation of pairwise comparison metrics\n such as Pearson's correlation.\n \"\"\"\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n\n\nclass DocTokenStats:\n <docstring token>\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n\n def SetN(self, n):\n self.n_ = n\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n\n\nclass DocTokenStats:\n <docstring token>\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n\n def InitTokenStats(self, tok):\n tstats = TokenFreqStats(tok)\n tfreq = []\n for doc in self.doclist_.Docs():\n c = 0\n for lang in doc.Langs():\n if tok.LangId() != lang.Id():\n continue\n tmptok = lang.MatchToken(tok)\n if tmptok is not None:\n c += tmptok.Count()\n tfreq.append(c)\n tstats.SetFrequencies(tfreq)\n tstats.NormFrequencies()\n tstats.CalcFreqStats()\n self.tokstats_[tok.EncodeForHash()] = tstats\n return tstats\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n <function token>\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n\n\nclass DocTokenStats:\n <docstring token>\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n <function token>\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n <function token>\n\n def GetN(self):\n return self.n_\n\n def PearsonsCorrelation(self, token1, token2):\n stats1 = self.GetTokenStats(token1)\n stats2 = self.GetTokenStats(token2)\n freq1 = stats1.Frequencies()\n freq2 = stats2.Frequencies()\n sumxy = sum(map(lambda x, y: x * y, freq1, freq2))\n covxy = sumxy / float(self.n_) - stats1.FreqSum() * stats2.FreqSum(\n ) / float(self.n_ ** 2)\n try:\n rho = covxy / sqrt(stats1.FreqVariance() * stats2.FreqVariance())\n except ZeroDivisionError:\n rho = 0.0\n return rho\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n\n\nclass DocTokenStats:\n <docstring token>\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n <function token>\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n <function token>\n\n def GetN(self):\n return self.n_\n <function token>\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n\n\nclass DocTokenStats:\n <docstring token>\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n <function token>\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n\n def GetTokenStats(self, tok):\n try:\n return self.tokstats_[tok.EncodeForHash()]\n except KeyError:\n return self.InitTokenStats(tok)\n\n def TokenStats(self):\n return self.tokstats_.values()\n <function token>\n <function token>\n <function token>\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n\n\nclass DocTokenStats:\n <docstring token>\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n <function token>\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n <function token>\n\n def TokenStats(self):\n return self.tokstats_.values()\n <function token>\n <function token>\n <function token>\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n\n\nclass DocTokenStats:\n <docstring token>\n\n def __init__(self, doclist=None):\n if doclist is None:\n self.doclist_ = documents.Doclist()\n else:\n self.doclist_ = doclist\n self.n_ = len(self.doclist_.Docs())\n self.tokstats_ = {}\n <function token>\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n\n\nclass DocTokenStats:\n <docstring token>\n <function token>\n <function token>\n\n def AddTokenStats(self, tstats):\n tokhash = tstats.Token().EncodeForHash()\n if tokhash not in self.tokstats_:\n self.tokstats_[tokhash] = tstats\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n\n\nclass DocTokenStats:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Lang:\n \"\"\"Holder for tokens in a language.\n \"\"\"\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Lang:\n <docstring token>\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n\n def MatchToken(self, token):\n try:\n i = self.tokens_.index(token)\n return self.tokens_[i]\n except ValueError:\n return None\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Lang:\n <docstring token>\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n <function token>\n\n def CompactTokens(self):\n \"\"\"Merge identical tokens and cumulate their counts. Checks to see\n that morphology and pronunciations are identical, otherwise the\n tokens will not be merged.\n \"\"\"\n map = {}\n for token_ in self.tokens_:\n hash_string = token_.EncodeForHash()\n try:\n map[hash_string].append(token_)\n except KeyError:\n map[hash_string] = [token_]\n ntokens = []\n keys = map.keys()\n keys.sort()\n for k in keys:\n token_ = map[k][0]\n for otoken in map[k][1:]:\n token_.IncrementCount(otoken.Count())\n ntokens.append(token_)\n self.tokens_ = ntokens\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Lang:\n <docstring token>\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n\n def XmlEncode(self):\n if len(self.tokens_) == 0:\n return ''\n xml_string_ = '<lang id=\"%s\">\\n%s\\n%s</lang>'\n xml_tokens = []\n for token_ in self.Tokens():\n xml_tokens.append(token_.XmlEncode())\n xml_result = xml_string_ % (self.id_, '\\n'.join(xml_tokens),\n LANG_INDENT_)\n return LANG_INDENT_ + xml_result\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n <function token>\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Lang:\n <docstring token>\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n <function token>\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n\n def Tokens(self):\n return self.tokens_\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n <function token>\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Lang:\n <docstring token>\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n <function token>\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n <function token>\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n\n def AddToken(self, token, merge=False):\n \"\"\"If an identical token already exists in dictionary,\n will merge tokens and cumulate their counts. Checks to\n see that morphology and pronunciations are identical,\n otherwise the tokens will not be merged.\n \"\"\"\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())\n <function token>\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Lang:\n <docstring token>\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n <function token>\n\n def Id(self):\n return self.id_\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n <function token>\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Lang:\n <docstring token>\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n <function token>\n <function token>\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n <function token>\n\n def SetTokens(self, tokens):\n self.tokens_ = []\n for t in tokens:\n self.AddToken(t)\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Lang:\n <docstring token>\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n <function token>\n <function token>\n\n def SetId(self, id):\n self.id_ = id.encode('utf-8')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Lang:\n <docstring token>\n\n def __init__(self):\n self.id_ = ''\n self.tokens_ = []\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Lang:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,306 |
9a421c88dc25adb6ef52fb64f81321ec4f310b84
|
# -*- coding: utf-8 -*-
"""Interface for Customer Service Incident
"""
from immunarray.lims import messageFactory as _
from immunarray.lims.interfaces import BaseModel
from zope import schema
class ICustomerServiceCall(BaseModel):
"""Interface for Customer Service Call objects CSC
"""
csc_client = schema.Choice(
title=_(u"Client"),
description=_(u"Client"),
required=True,
values=[_(u'Quarantined'), _(u'Released')],
)
csc_instance = schema.Choice(
title=_(u"Instance Type"),
description=_(u"Instance Type"),
required=True,
values=[_(u'Quarantined'), _(u'Released')],
)
csc_datetime = schema.Datetime(
title=_(u"Date and Time of Instance"),
description=_(u"Date and Time of Instance"),
required=False,
)
csc_follow_up_needed = schema.Bool(
title=_(u"Is Follow Up Needed"),
description=_(u"Is Follow Up Needed"),
required=False,
)
csc_status = schema.Choice(
title=_(u"Status of CSI"),
description=_(u"Status of CSI"),
required=True,
values=[_(u'Open'), _(u'Closed'), _(u'Held')],
)
csc_details = schema.Text(
title=_(u"Details of CSI"),
description=_(u"Details of CSI"),
required=False,
)
|
[
"# -*- coding: utf-8 -*-\n\"\"\"Interface for Customer Service Incident\n\"\"\"\n\nfrom immunarray.lims import messageFactory as _\nfrom immunarray.lims.interfaces import BaseModel\nfrom zope import schema\n\n\nclass ICustomerServiceCall(BaseModel):\n \"\"\"Interface for Customer Service Call objects CSC\n \"\"\"\n csc_client = schema.Choice(\n title=_(u\"Client\"),\n description=_(u\"Client\"),\n required=True,\n values=[_(u'Quarantined'), _(u'Released')],\n )\n csc_instance = schema.Choice(\n title=_(u\"Instance Type\"),\n description=_(u\"Instance Type\"),\n required=True,\n values=[_(u'Quarantined'), _(u'Released')],\n )\n csc_datetime = schema.Datetime(\n title=_(u\"Date and Time of Instance\"),\n description=_(u\"Date and Time of Instance\"),\n required=False,\n )\n csc_follow_up_needed = schema.Bool(\n title=_(u\"Is Follow Up Needed\"),\n description=_(u\"Is Follow Up Needed\"),\n required=False,\n )\n csc_status = schema.Choice(\n title=_(u\"Status of CSI\"),\n description=_(u\"Status of CSI\"),\n required=True,\n values=[_(u'Open'), _(u'Closed'), _(u'Held')],\n )\n csc_details = schema.Text(\n title=_(u\"Details of CSI\"),\n description=_(u\"Details of CSI\"),\n required=False,\n )\n",
"<docstring token>\nfrom immunarray.lims import messageFactory as _\nfrom immunarray.lims.interfaces import BaseModel\nfrom zope import schema\n\n\nclass ICustomerServiceCall(BaseModel):\n \"\"\"Interface for Customer Service Call objects CSC\n \"\"\"\n csc_client = schema.Choice(title=_(u'Client'), description=_(u'Client'),\n required=True, values=[_(u'Quarantined'), _(u'Released')])\n csc_instance = schema.Choice(title=_(u'Instance Type'), description=_(\n u'Instance Type'), required=True, values=[_(u'Quarantined'), _(\n u'Released')])\n csc_datetime = schema.Datetime(title=_(u'Date and Time of Instance'),\n description=_(u'Date and Time of Instance'), required=False)\n csc_follow_up_needed = schema.Bool(title=_(u'Is Follow Up Needed'),\n description=_(u'Is Follow Up Needed'), required=False)\n csc_status = schema.Choice(title=_(u'Status of CSI'), description=_(\n u'Status of CSI'), required=True, values=[_(u'Open'), _(u'Closed'),\n _(u'Held')])\n csc_details = schema.Text(title=_(u'Details of CSI'), description=_(\n u'Details of CSI'), required=False)\n",
"<docstring token>\n<import token>\n\n\nclass ICustomerServiceCall(BaseModel):\n \"\"\"Interface for Customer Service Call objects CSC\n \"\"\"\n csc_client = schema.Choice(title=_(u'Client'), description=_(u'Client'),\n required=True, values=[_(u'Quarantined'), _(u'Released')])\n csc_instance = schema.Choice(title=_(u'Instance Type'), description=_(\n u'Instance Type'), required=True, values=[_(u'Quarantined'), _(\n u'Released')])\n csc_datetime = schema.Datetime(title=_(u'Date and Time of Instance'),\n description=_(u'Date and Time of Instance'), required=False)\n csc_follow_up_needed = schema.Bool(title=_(u'Is Follow Up Needed'),\n description=_(u'Is Follow Up Needed'), required=False)\n csc_status = schema.Choice(title=_(u'Status of CSI'), description=_(\n u'Status of CSI'), required=True, values=[_(u'Open'), _(u'Closed'),\n _(u'Held')])\n csc_details = schema.Text(title=_(u'Details of CSI'), description=_(\n u'Details of CSI'), required=False)\n",
"<docstring token>\n<import token>\n\n\nclass ICustomerServiceCall(BaseModel):\n <docstring token>\n csc_client = schema.Choice(title=_(u'Client'), description=_(u'Client'),\n required=True, values=[_(u'Quarantined'), _(u'Released')])\n csc_instance = schema.Choice(title=_(u'Instance Type'), description=_(\n u'Instance Type'), required=True, values=[_(u'Quarantined'), _(\n u'Released')])\n csc_datetime = schema.Datetime(title=_(u'Date and Time of Instance'),\n description=_(u'Date and Time of Instance'), required=False)\n csc_follow_up_needed = schema.Bool(title=_(u'Is Follow Up Needed'),\n description=_(u'Is Follow Up Needed'), required=False)\n csc_status = schema.Choice(title=_(u'Status of CSI'), description=_(\n u'Status of CSI'), required=True, values=[_(u'Open'), _(u'Closed'),\n _(u'Held')])\n csc_details = schema.Text(title=_(u'Details of CSI'), description=_(\n u'Details of CSI'), required=False)\n",
"<docstring token>\n<import token>\n\n\nclass ICustomerServiceCall(BaseModel):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
98,307 |
92a8ea61d471afa8360a32870e7e69d3a3f1f042
|
# Ensure a class has only one instance and provide a global point of access to it.
# Use when there must by only one instance of a class.
class Person:
__instance = None
@staticmethod
def getInstance():
if Person.__instance == None:
Person()
return Person.__instance
def __init__(self):
if Person.__instance != None:
raise Exception("This class is a singleton!")
Person.__instance = self
def main():
p = Person()
print p
same_p = p.getInstance()
print same_p
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
|
[
"# Ensure a class has only one instance and provide a global point of access to it.\n\n# Use when there must by only one instance of a class.\n\nclass Person:\n __instance = None\n @staticmethod\n def getInstance():\n if Person.__instance == None:\n Person()\n return Person.__instance\n\n def __init__(self):\n if Person.__instance != None:\n raise Exception(\"This class is a singleton!\")\n \n Person.__instance = self\n\ndef main():\n p = Person()\n print p\n \n same_p = p.getInstance()\n print same_p\n\n# Standard boilerplate to call the main() function.\nif __name__ == '__main__':\n main()\n\n \n"
] | true |
98,308 |
122d7c9aa3a72b65bffc3a4905a14bf8ee68b896
|
#!/usr/bin/env python3
# -*- utf-8 -*-
import numpy as np
class TreeNode:
def __init__(self, name, num_occur, parent):
self.name = name
self.count = num_occur
self.node_link = None
self.parent = parent
self.children = {}
def inc(self, num_occur):
self.count += num_occur
def disp(self, ind=1):
print(' ' * ind, self.name, ' ', self.count)
for child in self.children.values():
child.disp(ind + 1)
def create_tree(data_set, min_sup=1):
header = {}
for trans in data_set:
for item in trans:
header[item] = header.get(item, 0) + data_set[trans]
header = {k: v for k, v in header.items() if v >= min_sup}
freq_item_set = set(header.keys())
if len(freq_item_set) == 0:
return None, None
for k in header:
header[k] = [header[k], None]
ret_tree = TreeNode('Null Set', 1, None)
for trans, count in data_set.items():
local_d = {}
for item in trans:
if item in freq_item_set:
local_d[item] = header[item][0]
if len(local_d) > 0:
ordered_items = [v[0] for v in sorted(local_d.items(), key=lambda p: p[1], reverse=True)]
update_tree(ordered_items, ret_tree, header, count)
print('222')
return ret_tree, header
def update_tree(items, in_tree, header, count):
if items[0] in in_tree.children:
in_tree.children[items[0]].inc(count)
else:
in_tree.children[items[0]] = TreeNode(items[0], count, in_tree)
if header[items[0]][1] is None:
header[items[0]][1] = in_tree.children[items[0]]
else:
update_header(header[items[0]][1], in_tree.children[items[0]])
if len(items) > 1:
update_tree(items[1::], in_tree.children[items[0]], header, count)
def update_header(node2test, target_node):
while node2test.node_link is not None:
node2test = node2test.node_link
node2test.node_link = target_node
def load_simple_data():
return [['r', 'z', 'h', 'j', 'p'],
['z', 'y', 'x', 'w', 'v', 'u', 't', 's'],
['z'],
['r', 'x', 'n', 'o', 's'],
['y', 'r', 'x', 'z', 'q', 't', 'p'],
['y', 'z', 'x', 'e', 'q', 's', 't', 'm']]
def create_init_set(data_set):
ret_dict = {}
for trans in data_set:
ret_dict[frozenset(trans)] = 1
return ret_dict
if __name__ == '__main__':
pass
|
[
"#!/usr/bin/env python3\n# -*- utf-8 -*-\n\nimport numpy as np\n\n\nclass TreeNode:\n def __init__(self, name, num_occur, parent):\n self.name = name\n self.count = num_occur\n self.node_link = None\n self.parent = parent\n self.children = {}\n\n def inc(self, num_occur):\n self.count += num_occur\n\n def disp(self, ind=1):\n print(' ' * ind, self.name, ' ', self.count)\n for child in self.children.values():\n child.disp(ind + 1)\n\n\ndef create_tree(data_set, min_sup=1):\n header = {}\n for trans in data_set:\n for item in trans:\n header[item] = header.get(item, 0) + data_set[trans]\n header = {k: v for k, v in header.items() if v >= min_sup}\n freq_item_set = set(header.keys())\n if len(freq_item_set) == 0:\n return None, None\n for k in header:\n header[k] = [header[k], None]\n ret_tree = TreeNode('Null Set', 1, None)\n for trans, count in data_set.items():\n local_d = {}\n for item in trans:\n if item in freq_item_set:\n local_d[item] = header[item][0]\n if len(local_d) > 0:\n ordered_items = [v[0] for v in sorted(local_d.items(), key=lambda p: p[1], reverse=True)]\n update_tree(ordered_items, ret_tree, header, count)\n print('222')\n return ret_tree, header\n\n\ndef update_tree(items, in_tree, header, count):\n if items[0] in in_tree.children:\n in_tree.children[items[0]].inc(count)\n else:\n in_tree.children[items[0]] = TreeNode(items[0], count, in_tree)\n if header[items[0]][1] is None:\n header[items[0]][1] = in_tree.children[items[0]]\n else:\n update_header(header[items[0]][1], in_tree.children[items[0]])\n if len(items) > 1:\n update_tree(items[1::], in_tree.children[items[0]], header, count)\n\n\ndef update_header(node2test, target_node):\n while node2test.node_link is not None:\n node2test = node2test.node_link\n node2test.node_link = target_node\n\n\ndef load_simple_data():\n return [['r', 'z', 'h', 'j', 'p'],\n ['z', 'y', 'x', 'w', 'v', 'u', 't', 's'],\n ['z'],\n ['r', 'x', 'n', 'o', 's'],\n ['y', 'r', 'x', 'z', 'q', 't', 'p'],\n ['y', 'z', 'x', 'e', 'q', 's', 't', 'm']]\n\n\ndef create_init_set(data_set):\n ret_dict = {}\n for trans in data_set:\n ret_dict[frozenset(trans)] = 1\n return ret_dict\n\n\nif __name__ == '__main__':\n pass\n",
"import numpy as np\n\n\nclass TreeNode:\n\n def __init__(self, name, num_occur, parent):\n self.name = name\n self.count = num_occur\n self.node_link = None\n self.parent = parent\n self.children = {}\n\n def inc(self, num_occur):\n self.count += num_occur\n\n def disp(self, ind=1):\n print(' ' * ind, self.name, ' ', self.count)\n for child in self.children.values():\n child.disp(ind + 1)\n\n\ndef create_tree(data_set, min_sup=1):\n header = {}\n for trans in data_set:\n for item in trans:\n header[item] = header.get(item, 0) + data_set[trans]\n header = {k: v for k, v in header.items() if v >= min_sup}\n freq_item_set = set(header.keys())\n if len(freq_item_set) == 0:\n return None, None\n for k in header:\n header[k] = [header[k], None]\n ret_tree = TreeNode('Null Set', 1, None)\n for trans, count in data_set.items():\n local_d = {}\n for item in trans:\n if item in freq_item_set:\n local_d[item] = header[item][0]\n if len(local_d) > 0:\n ordered_items = [v[0] for v in sorted(local_d.items(), key=lambda\n p: p[1], reverse=True)]\n update_tree(ordered_items, ret_tree, header, count)\n print('222')\n return ret_tree, header\n\n\ndef update_tree(items, in_tree, header, count):\n if items[0] in in_tree.children:\n in_tree.children[items[0]].inc(count)\n else:\n in_tree.children[items[0]] = TreeNode(items[0], count, in_tree)\n if header[items[0]][1] is None:\n header[items[0]][1] = in_tree.children[items[0]]\n else:\n update_header(header[items[0]][1], in_tree.children[items[0]])\n if len(items) > 1:\n update_tree(items[1:], in_tree.children[items[0]], header, count)\n\n\ndef update_header(node2test, target_node):\n while node2test.node_link is not None:\n node2test = node2test.node_link\n node2test.node_link = target_node\n\n\ndef load_simple_data():\n return [['r', 'z', 'h', 'j', 'p'], ['z', 'y', 'x', 'w', 'v', 'u', 't',\n 's'], ['z'], ['r', 'x', 'n', 'o', 's'], ['y', 'r', 'x', 'z', 'q',\n 't', 'p'], ['y', 'z', 'x', 'e', 'q', 's', 't', 'm']]\n\n\ndef create_init_set(data_set):\n ret_dict = {}\n for trans in data_set:\n ret_dict[frozenset(trans)] = 1\n return ret_dict\n\n\nif __name__ == '__main__':\n pass\n",
"<import token>\n\n\nclass TreeNode:\n\n def __init__(self, name, num_occur, parent):\n self.name = name\n self.count = num_occur\n self.node_link = None\n self.parent = parent\n self.children = {}\n\n def inc(self, num_occur):\n self.count += num_occur\n\n def disp(self, ind=1):\n print(' ' * ind, self.name, ' ', self.count)\n for child in self.children.values():\n child.disp(ind + 1)\n\n\ndef create_tree(data_set, min_sup=1):\n header = {}\n for trans in data_set:\n for item in trans:\n header[item] = header.get(item, 0) + data_set[trans]\n header = {k: v for k, v in header.items() if v >= min_sup}\n freq_item_set = set(header.keys())\n if len(freq_item_set) == 0:\n return None, None\n for k in header:\n header[k] = [header[k], None]\n ret_tree = TreeNode('Null Set', 1, None)\n for trans, count in data_set.items():\n local_d = {}\n for item in trans:\n if item in freq_item_set:\n local_d[item] = header[item][0]\n if len(local_d) > 0:\n ordered_items = [v[0] for v in sorted(local_d.items(), key=lambda\n p: p[1], reverse=True)]\n update_tree(ordered_items, ret_tree, header, count)\n print('222')\n return ret_tree, header\n\n\ndef update_tree(items, in_tree, header, count):\n if items[0] in in_tree.children:\n in_tree.children[items[0]].inc(count)\n else:\n in_tree.children[items[0]] = TreeNode(items[0], count, in_tree)\n if header[items[0]][1] is None:\n header[items[0]][1] = in_tree.children[items[0]]\n else:\n update_header(header[items[0]][1], in_tree.children[items[0]])\n if len(items) > 1:\n update_tree(items[1:], in_tree.children[items[0]], header, count)\n\n\ndef update_header(node2test, target_node):\n while node2test.node_link is not None:\n node2test = node2test.node_link\n node2test.node_link = target_node\n\n\ndef load_simple_data():\n return [['r', 'z', 'h', 'j', 'p'], ['z', 'y', 'x', 'w', 'v', 'u', 't',\n 's'], ['z'], ['r', 'x', 'n', 'o', 's'], ['y', 'r', 'x', 'z', 'q',\n 't', 'p'], ['y', 'z', 'x', 'e', 'q', 's', 't', 'm']]\n\n\ndef create_init_set(data_set):\n ret_dict = {}\n for trans in data_set:\n ret_dict[frozenset(trans)] = 1\n return ret_dict\n\n\nif __name__ == '__main__':\n pass\n",
"<import token>\n\n\nclass TreeNode:\n\n def __init__(self, name, num_occur, parent):\n self.name = name\n self.count = num_occur\n self.node_link = None\n self.parent = parent\n self.children = {}\n\n def inc(self, num_occur):\n self.count += num_occur\n\n def disp(self, ind=1):\n print(' ' * ind, self.name, ' ', self.count)\n for child in self.children.values():\n child.disp(ind + 1)\n\n\ndef create_tree(data_set, min_sup=1):\n header = {}\n for trans in data_set:\n for item in trans:\n header[item] = header.get(item, 0) + data_set[trans]\n header = {k: v for k, v in header.items() if v >= min_sup}\n freq_item_set = set(header.keys())\n if len(freq_item_set) == 0:\n return None, None\n for k in header:\n header[k] = [header[k], None]\n ret_tree = TreeNode('Null Set', 1, None)\n for trans, count in data_set.items():\n local_d = {}\n for item in trans:\n if item in freq_item_set:\n local_d[item] = header[item][0]\n if len(local_d) > 0:\n ordered_items = [v[0] for v in sorted(local_d.items(), key=lambda\n p: p[1], reverse=True)]\n update_tree(ordered_items, ret_tree, header, count)\n print('222')\n return ret_tree, header\n\n\ndef update_tree(items, in_tree, header, count):\n if items[0] in in_tree.children:\n in_tree.children[items[0]].inc(count)\n else:\n in_tree.children[items[0]] = TreeNode(items[0], count, in_tree)\n if header[items[0]][1] is None:\n header[items[0]][1] = in_tree.children[items[0]]\n else:\n update_header(header[items[0]][1], in_tree.children[items[0]])\n if len(items) > 1:\n update_tree(items[1:], in_tree.children[items[0]], header, count)\n\n\ndef update_header(node2test, target_node):\n while node2test.node_link is not None:\n node2test = node2test.node_link\n node2test.node_link = target_node\n\n\ndef load_simple_data():\n return [['r', 'z', 'h', 'j', 'p'], ['z', 'y', 'x', 'w', 'v', 'u', 't',\n 's'], ['z'], ['r', 'x', 'n', 'o', 's'], ['y', 'r', 'x', 'z', 'q',\n 't', 'p'], ['y', 'z', 'x', 'e', 'q', 's', 't', 'm']]\n\n\ndef create_init_set(data_set):\n ret_dict = {}\n for trans in data_set:\n ret_dict[frozenset(trans)] = 1\n return ret_dict\n\n\n<code token>\n",
"<import token>\n\n\nclass TreeNode:\n\n def __init__(self, name, num_occur, parent):\n self.name = name\n self.count = num_occur\n self.node_link = None\n self.parent = parent\n self.children = {}\n\n def inc(self, num_occur):\n self.count += num_occur\n\n def disp(self, ind=1):\n print(' ' * ind, self.name, ' ', self.count)\n for child in self.children.values():\n child.disp(ind + 1)\n\n\n<function token>\n\n\ndef update_tree(items, in_tree, header, count):\n if items[0] in in_tree.children:\n in_tree.children[items[0]].inc(count)\n else:\n in_tree.children[items[0]] = TreeNode(items[0], count, in_tree)\n if header[items[0]][1] is None:\n header[items[0]][1] = in_tree.children[items[0]]\n else:\n update_header(header[items[0]][1], in_tree.children[items[0]])\n if len(items) > 1:\n update_tree(items[1:], in_tree.children[items[0]], header, count)\n\n\ndef update_header(node2test, target_node):\n while node2test.node_link is not None:\n node2test = node2test.node_link\n node2test.node_link = target_node\n\n\ndef load_simple_data():\n return [['r', 'z', 'h', 'j', 'p'], ['z', 'y', 'x', 'w', 'v', 'u', 't',\n 's'], ['z'], ['r', 'x', 'n', 'o', 's'], ['y', 'r', 'x', 'z', 'q',\n 't', 'p'], ['y', 'z', 'x', 'e', 'q', 's', 't', 'm']]\n\n\ndef create_init_set(data_set):\n ret_dict = {}\n for trans in data_set:\n ret_dict[frozenset(trans)] = 1\n return ret_dict\n\n\n<code token>\n",
"<import token>\n\n\nclass TreeNode:\n\n def __init__(self, name, num_occur, parent):\n self.name = name\n self.count = num_occur\n self.node_link = None\n self.parent = parent\n self.children = {}\n\n def inc(self, num_occur):\n self.count += num_occur\n\n def disp(self, ind=1):\n print(' ' * ind, self.name, ' ', self.count)\n for child in self.children.values():\n child.disp(ind + 1)\n\n\n<function token>\n\n\ndef update_tree(items, in_tree, header, count):\n if items[0] in in_tree.children:\n in_tree.children[items[0]].inc(count)\n else:\n in_tree.children[items[0]] = TreeNode(items[0], count, in_tree)\n if header[items[0]][1] is None:\n header[items[0]][1] = in_tree.children[items[0]]\n else:\n update_header(header[items[0]][1], in_tree.children[items[0]])\n if len(items) > 1:\n update_tree(items[1:], in_tree.children[items[0]], header, count)\n\n\ndef update_header(node2test, target_node):\n while node2test.node_link is not None:\n node2test = node2test.node_link\n node2test.node_link = target_node\n\n\n<function token>\n\n\ndef create_init_set(data_set):\n ret_dict = {}\n for trans in data_set:\n ret_dict[frozenset(trans)] = 1\n return ret_dict\n\n\n<code token>\n",
"<import token>\n\n\nclass TreeNode:\n\n def __init__(self, name, num_occur, parent):\n self.name = name\n self.count = num_occur\n self.node_link = None\n self.parent = parent\n self.children = {}\n\n def inc(self, num_occur):\n self.count += num_occur\n\n def disp(self, ind=1):\n print(' ' * ind, self.name, ' ', self.count)\n for child in self.children.values():\n child.disp(ind + 1)\n\n\n<function token>\n\n\ndef update_tree(items, in_tree, header, count):\n if items[0] in in_tree.children:\n in_tree.children[items[0]].inc(count)\n else:\n in_tree.children[items[0]] = TreeNode(items[0], count, in_tree)\n if header[items[0]][1] is None:\n header[items[0]][1] = in_tree.children[items[0]]\n else:\n update_header(header[items[0]][1], in_tree.children[items[0]])\n if len(items) > 1:\n update_tree(items[1:], in_tree.children[items[0]], header, count)\n\n\n<function token>\n<function token>\n\n\ndef create_init_set(data_set):\n ret_dict = {}\n for trans in data_set:\n ret_dict[frozenset(trans)] = 1\n return ret_dict\n\n\n<code token>\n",
"<import token>\n\n\nclass TreeNode:\n\n def __init__(self, name, num_occur, parent):\n self.name = name\n self.count = num_occur\n self.node_link = None\n self.parent = parent\n self.children = {}\n\n def inc(self, num_occur):\n self.count += num_occur\n\n def disp(self, ind=1):\n print(' ' * ind, self.name, ' ', self.count)\n for child in self.children.values():\n child.disp(ind + 1)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef create_init_set(data_set):\n ret_dict = {}\n for trans in data_set:\n ret_dict[frozenset(trans)] = 1\n return ret_dict\n\n\n<code token>\n",
"<import token>\n\n\nclass TreeNode:\n\n def __init__(self, name, num_occur, parent):\n self.name = name\n self.count = num_occur\n self.node_link = None\n self.parent = parent\n self.children = {}\n\n def inc(self, num_occur):\n self.count += num_occur\n\n def disp(self, ind=1):\n print(' ' * ind, self.name, ' ', self.count)\n for child in self.children.values():\n child.disp(ind + 1)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass TreeNode:\n <function token>\n\n def inc(self, num_occur):\n self.count += num_occur\n\n def disp(self, ind=1):\n print(' ' * ind, self.name, ' ', self.count)\n for child in self.children.values():\n child.disp(ind + 1)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass TreeNode:\n <function token>\n\n def inc(self, num_occur):\n self.count += num_occur\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass TreeNode:\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,309 |
d96734dfc098851192365f2009c3a663d97356ee
|
import cv2
import glfw
import importlib
import numpy as np
import pkgutil
import tasks
import time
import torch
import igl
from evaluator import Evaluator
from glfw_controller import *
from image_view import *
from mesh_view import MultiMeshModel
from mesh_view import MultiMeshView
from multiprocessing import Process
from video_capture import VideoCapture
from torch.autograd import Variable
from tasks.camera_to_image import CfgLoader
import graphics_math as gm
def render_image(model, pose):
pose = torch.from_numpy(
np.reshape(poses[0], (1, 1, 1, 7)).astype(np.float32)
)
pose = Variable(pose).cpu()
img = model(pose)
return img
def checker_board():
return cv2.cvtColor(cv2.imread("checkerboard.jpg"), cv2.COLOR_BGR2RGB)
def to_img(x):
x = 0.5 * (x + 1)
x = x.clamp(0, 1)
x = x.view(x.size(0), 3, 128, 128)
return x
def to_numpy_img(x):
x = to_img(x)
x = x.detach().numpy().squeeze() if len(x.shape) == 4 else x
x = np.transpose(x, (1, 2, 0))
x *= 255.0
x = np.clip(x, 0.0, 255.0)
x = x.astype(np.uint8)
x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
return x
def to_torch_pose(x):
x = torch.from_numpy(np.reshape(x, (1, 1, 1, 7)).astype(np.float32))
x = Variable(x).cpu()
return x
def read_poses(pose_file):
lines = open(pose_file).read().splitlines()
poses = [[float(z) for z in x.split()[1:]] for x in lines]
return poses
def axes():
return (
np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]),
np.array([0, 1, 0, 2, 0, 3]),
np.array(
[[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]
),
)
def update_axes(which, angle, orientation):
axis = orientation[:, which]
transformation = gm.rotate(angle, axis)
orientation = transformation.dot(orientation)
return orientation
load_configuration = True
load_model = True
load_weights = True
load_frames = True
load_poses = True
if load_configuration:
print(f"Loading configuration ...")
cfg = CfgLoader().get_cfg("cpu")
if load_model:
print("Loading model ...")
model = cfg["model"]
if load_weights:
print("Loading model weights ...")
weights_file = cfg["weights_file"]
model.load_state_dict(torch.load(f"./{weights_file}"))
poses_file = (
f'{cfg["target_dir"]}/poses.txt'
if load_configuration
else "targets/camera_to_image/poses.txt"
)
movie_file = (
f'{cfg["target_dir"]}/movie.mov'
if load_configuration
else "targets/camera_to_image/movie.mov"
)
if load_frames:
print(f"Loading frames: ./{movie_file} ...")
frames = VideoCapture(f"./{movie_file}")
if load_poses:
print(f"Loading poses: ./{poses_file} ...")
poses = read_poses(f"./{poses_file}")
poses = [
[float(x) for x in l.split()[1:]]
for l in open(poses_file).read().splitlines()
]
poses = np.array(poses)
print("Finding bounding sphere ...")
translations = np.zeros((poses.shape[0], 4))
translations[:, 0:3] = poses[:, 0:3]
translations[:, 3] = 1
quaternions = poses[:, 3:7]
# lights = poses[:, 7:]
vertices = translations[:, 0:3]
points = translations.T
num_points = translations.shape[0]
A = 2 * points
A[3, :] = 1
f = np.zeros((1, num_points))
for i in range(num_points):
f[0, i] = np.linalg.norm(points[0:3, i]) ** 2
C, res, rank, svals = np.linalg.lstsq(A.T, f.T, rcond=None)
radius = (np.linalg.norm(C[0:3]) ** 2 + C[3]) ** (1 / 2)
print(f"C = {C}, R = {((np.linalg.norm(C[0:3]) ** 2) + C[3]) ** (1/2)}")
print(f"C[0] = {C[0]}, C[1] = {C[1]}, C[2] = {C[2]}")
app = GlfwApp()
app.init()
multi_controller = GlfwMultiController()
width, height = 640, 480
xpos, ypos, title = 0, 0, "Camera"
point_fragment_shader = "point_fragment.glsl"
point_vertex_shader = "point_vertex.glsl"
vertices = np.concatenate((vertices, np.array([vertices[0, :]])))
big_point = np.array([[0, 0, 0]])
multi_mesh_model = MultiMeshModel(
[
{
"name": "bounding_sphere",
"type": "mesh",
"mesh": "sphere.obj",
"M": gm.uniform_scale(5.0).dot(gm.translate(0, -0.1, 0)),
"fragment": "wireframe_fragment.glsl",
"vertex": "wireframe_vertex.glsl",
"geometry": "wireframe_geometry.glsl",
"color": np.array([1.0, 1.0, 1.0]),
"opacity": 0.5,
},
{
"name": "points",
"type": "points",
"mesh": vertices,
"M": gm.uniform_scale(0.5 / radius).dot(
gm.translate(-C[0], -C[1], -C[2])
),
"fragment": "point_fragment.glsl",
"vertex": "point_vertex.glsl",
"geometry": None,
"color": np.array([1.0, 0.0, 0.0]),
},
{
"name": "axes",
"type": "lines",
"mesh": axes(),
"R": np.eye(4),
"T": gm.translate(0, 0, 0),
"scale": gm.uniform_scale(0.20),
"M": gm.uniform_scale(0.20),
"fragment": "line_fragment.glsl",
"vertex": "line_vertex.glsl",
"geometry": "line_geometry.glsl",
"color": np.array([0.0, 1.0, 0.0]),
},
]
)
class KeyCallbackHandler:
def __init__(self, data):
self.data = data
def update_orientation(self, name, which, angle):
obj = self.data.name_to_mesh_info[name]
R = obj["R"]
axis = np.expand_dims(R[:, which][0:3], 0).T
# import pdb
# pdb.set_trace()
M = gm.rotate(angle, axis)
obj["R"] = gm.rotate(angle, axis).dot(R)
def update_translation(self, name, tx, ty, tz):
obj = self.data.name_to_mesh_info[name]
obj["T"] = gm.translate(tx, ty, tz).dot(obj["T"])
def update_model_matrix(self, name):
obj = self.data.name_to_mesh_info[name]
obj["M"] = np.linalg.multi_dot([obj["T"], obj["R"], obj["scale"]])
def key_handler(self, key, scancode, action, mods):
if key == glfw.KEY_W and action == glfw.PRESS:
self.update_translation("axes", 0, 0, 0.025)
elif key == glfw.KEY_A and action == glfw.PRESS:
self.update_translation("axes", -0.025, 0, 0)
elif key == glfw.KEY_S and action == glfw.PRESS:
self.update_translation("axes", 0, 0, -0.025)
elif key == glfw.KEY_D and action == glfw.PRESS:
self.update_translation("axes", 0.025, 0, 0.0)
elif key == glfw.KEY_R and action == glfw.PRESS:
self.update_translation("axes", 0, 0.025, 0.0)
elif key == glfw.KEY_F and action == glfw.PRESS:
self.update_translation("axes", 0, -0.025, 0.0)
elif key == glfw.KEY_U and action == glfw.PRESS:
self.update_orientation("axes", 0, 0.25)
elif key == glfw.KEY_J and action == glfw.PRESS:
self.update_orientation("axes", 0, -0.25)
elif key == glfw.KEY_H and action == glfw.PRESS:
self.update_orientation("axes", 1, 0.25)
elif key == glfw.KEY_K and action == glfw.PRESS:
self.update_orientation("axes", 1, -0.25)
elif key == glfw.KEY_O and action == glfw.PRESS:
self.update_orientation("axes", 2, 0.25)
elif key == glfw.KEY_L and action == glfw.PRESS:
self.update_orientation("axes", 2, -0.25)
elif key == glfw.KEY_SPACE and action == glfw.PRESS:
print("Render!")
obj = self.data.name_to_mesh_info["axes"]
self.update_model_matrix("axes")
multi_mesh_view = MultiMeshView()
eye = [0.0, 0.0, 2.0, 1.0]
at = [0.0, 0.0, 0.0, 1.0]
up = [0.0, 1.0, 0.0, 1.0]
fov = 45.0
near = 0.0001
far = 100
light_position = [0.0, 0.0, 4.0]
multi_mesh_view.set_camera(eye, at, up, fov, near, far)
multi_mesh_view.set_light_position(light_position)
mesh_controller = GlfwController(
width, height, xpos, ypos, title, multi_mesh_view, multi_mesh_model
)
mesh_controller.register_user_key_callback(KeyCallbackHandler(multi_mesh_model))
multi_controller.add(mesh_controller)
image_fragment_shader = "image_fragment.glsl"
image_vertex_shader = "image_vertex.glsl"
output_image = (
to_numpy_img(model(to_torch_pose(poses[0])))
if load_model
else checker_board()
)
image_model = ImageModel(output_image)
image_view = ImageView(image_fragment_shader, image_vertex_shader)
image_controller = GlfwController(
400, 300, 500, 100, "Image View", image_view, image_model
)
multi_controller.add(image_controller)
multi_controller.run()
|
[
"import cv2\nimport glfw\nimport importlib\nimport numpy as np\nimport pkgutil\nimport tasks\nimport time\nimport torch\nimport igl\n\nfrom evaluator import Evaluator\nfrom glfw_controller import *\nfrom image_view import *\nfrom mesh_view import MultiMeshModel\nfrom mesh_view import MultiMeshView\nfrom multiprocessing import Process\nfrom video_capture import VideoCapture\nfrom torch.autograd import Variable\n\nfrom tasks.camera_to_image import CfgLoader\n\nimport graphics_math as gm\n\n\ndef render_image(model, pose):\n pose = torch.from_numpy(\n np.reshape(poses[0], (1, 1, 1, 7)).astype(np.float32)\n )\n pose = Variable(pose).cpu()\n img = model(pose)\n return img\n\n\ndef checker_board():\n return cv2.cvtColor(cv2.imread(\"checkerboard.jpg\"), cv2.COLOR_BGR2RGB)\n\n\ndef to_img(x):\n x = 0.5 * (x + 1)\n x = x.clamp(0, 1)\n x = x.view(x.size(0), 3, 128, 128)\n return x\n\n\ndef to_numpy_img(x):\n x = to_img(x)\n x = x.detach().numpy().squeeze() if len(x.shape) == 4 else x\n x = np.transpose(x, (1, 2, 0))\n x *= 255.0\n x = np.clip(x, 0.0, 255.0)\n x = x.astype(np.uint8)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n return x\n\n\ndef to_torch_pose(x):\n x = torch.from_numpy(np.reshape(x, (1, 1, 1, 7)).astype(np.float32))\n x = Variable(x).cpu()\n return x\n\n\ndef read_poses(pose_file):\n lines = open(pose_file).read().splitlines()\n poses = [[float(z) for z in x.split()[1:]] for x in lines]\n return poses\n\n\ndef axes():\n return (\n np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]),\n np.array([0, 1, 0, 2, 0, 3]),\n np.array(\n [[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]\n ),\n )\n\n\ndef update_axes(which, angle, orientation):\n axis = orientation[:, which]\n transformation = gm.rotate(angle, axis)\n orientation = transformation.dot(orientation)\n return orientation\n\n\nload_configuration = True\nload_model = True \nload_weights = True \nload_frames = True \nload_poses = True \nif load_configuration:\n print(f\"Loading configuration ...\")\n cfg = CfgLoader().get_cfg(\"cpu\")\n if load_model:\n print(\"Loading model ...\")\n model = cfg[\"model\"]\n if load_weights:\n print(\"Loading model weights ...\")\n weights_file = cfg[\"weights_file\"]\n model.load_state_dict(torch.load(f\"./{weights_file}\"))\n\nposes_file = (\n f'{cfg[\"target_dir\"]}/poses.txt'\n if load_configuration\n else \"targets/camera_to_image/poses.txt\"\n)\nmovie_file = (\n f'{cfg[\"target_dir\"]}/movie.mov'\n if load_configuration\n else \"targets/camera_to_image/movie.mov\"\n)\n\nif load_frames:\n print(f\"Loading frames: ./{movie_file} ...\")\n frames = VideoCapture(f\"./{movie_file}\")\n\nif load_poses:\n print(f\"Loading poses: ./{poses_file} ...\")\n poses = read_poses(f\"./{poses_file}\")\n poses = [\n [float(x) for x in l.split()[1:]]\n for l in open(poses_file).read().splitlines()\n ]\n poses = np.array(poses)\n\nprint(\"Finding bounding sphere ...\")\ntranslations = np.zeros((poses.shape[0], 4))\ntranslations[:, 0:3] = poses[:, 0:3]\ntranslations[:, 3] = 1\nquaternions = poses[:, 3:7]\n# lights = poses[:, 7:]\nvertices = translations[:, 0:3]\n\npoints = translations.T\nnum_points = translations.shape[0]\nA = 2 * points\nA[3, :] = 1\nf = np.zeros((1, num_points))\n\nfor i in range(num_points):\n f[0, i] = np.linalg.norm(points[0:3, i]) ** 2\n\nC, res, rank, svals = np.linalg.lstsq(A.T, f.T, rcond=None)\nradius = (np.linalg.norm(C[0:3]) ** 2 + C[3]) ** (1 / 2)\nprint(f\"C = {C}, R = {((np.linalg.norm(C[0:3]) ** 2) + C[3]) ** (1/2)}\")\nprint(f\"C[0] = {C[0]}, C[1] = {C[1]}, C[2] = {C[2]}\")\n\napp = GlfwApp()\napp.init()\n\nmulti_controller = GlfwMultiController()\nwidth, height = 640, 480\nxpos, ypos, title = 0, 0, \"Camera\"\n\npoint_fragment_shader = \"point_fragment.glsl\"\npoint_vertex_shader = \"point_vertex.glsl\"\n\nvertices = np.concatenate((vertices, np.array([vertices[0, :]])))\nbig_point = np.array([[0, 0, 0]])\nmulti_mesh_model = MultiMeshModel(\n [\n {\n \"name\": \"bounding_sphere\",\n \"type\": \"mesh\",\n \"mesh\": \"sphere.obj\",\n \"M\": gm.uniform_scale(5.0).dot(gm.translate(0, -0.1, 0)),\n \"fragment\": \"wireframe_fragment.glsl\",\n \"vertex\": \"wireframe_vertex.glsl\",\n \"geometry\": \"wireframe_geometry.glsl\",\n \"color\": np.array([1.0, 1.0, 1.0]),\n \"opacity\": 0.5,\n },\n {\n \"name\": \"points\",\n \"type\": \"points\",\n \"mesh\": vertices,\n \"M\": gm.uniform_scale(0.5 / radius).dot(\n gm.translate(-C[0], -C[1], -C[2])\n ),\n \"fragment\": \"point_fragment.glsl\",\n \"vertex\": \"point_vertex.glsl\",\n \"geometry\": None,\n \"color\": np.array([1.0, 0.0, 0.0]),\n },\n {\n \"name\": \"axes\",\n \"type\": \"lines\",\n \"mesh\": axes(),\n \"R\": np.eye(4),\n \"T\": gm.translate(0, 0, 0),\n \"scale\": gm.uniform_scale(0.20),\n \"M\": gm.uniform_scale(0.20),\n \"fragment\": \"line_fragment.glsl\",\n \"vertex\": \"line_vertex.glsl\",\n \"geometry\": \"line_geometry.glsl\",\n \"color\": np.array([0.0, 1.0, 0.0]),\n },\n ]\n)\n\n\nclass KeyCallbackHandler:\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj[\"R\"]\n axis = np.expand_dims(R[:, which][0:3], 0).T\n # import pdb\n # pdb.set_trace()\n M = gm.rotate(angle, axis)\n obj[\"R\"] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj[\"T\"] = gm.translate(tx, ty, tz).dot(obj[\"T\"])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj[\"M\"] = np.linalg.multi_dot([obj[\"T\"], obj[\"R\"], obj[\"scale\"]])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation(\"axes\", 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation(\"axes\", -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation(\"axes\", 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation(\"axes\", 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation(\"axes\", 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation(\"axes\", 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation(\"axes\", 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation(\"axes\", 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation(\"axes\", 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation(\"axes\", 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation(\"axes\", 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation(\"axes\", 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print(\"Render!\")\n obj = self.data.name_to_mesh_info[\"axes\"]\n self.update_model_matrix(\"axes\")\n\n\nmulti_mesh_view = MultiMeshView()\neye = [0.0, 0.0, 2.0, 1.0]\nat = [0.0, 0.0, 0.0, 1.0]\nup = [0.0, 1.0, 0.0, 1.0]\nfov = 45.0\nnear = 0.0001\nfar = 100\nlight_position = [0.0, 0.0, 4.0]\nmulti_mesh_view.set_camera(eye, at, up, fov, near, far)\nmulti_mesh_view.set_light_position(light_position)\n\nmesh_controller = GlfwController(\n width, height, xpos, ypos, title, multi_mesh_view, multi_mesh_model\n)\nmesh_controller.register_user_key_callback(KeyCallbackHandler(multi_mesh_model))\nmulti_controller.add(mesh_controller)\n\nimage_fragment_shader = \"image_fragment.glsl\"\nimage_vertex_shader = \"image_vertex.glsl\"\n\noutput_image = (\n to_numpy_img(model(to_torch_pose(poses[0])))\n if load_model\n else checker_board()\n)\n\nimage_model = ImageModel(output_image)\nimage_view = ImageView(image_fragment_shader, image_vertex_shader)\n\nimage_controller = GlfwController(\n 400, 300, 500, 100, \"Image View\", image_view, image_model\n)\nmulti_controller.add(image_controller)\n\nmulti_controller.run()\n",
"import cv2\nimport glfw\nimport importlib\nimport numpy as np\nimport pkgutil\nimport tasks\nimport time\nimport torch\nimport igl\nfrom evaluator import Evaluator\nfrom glfw_controller import *\nfrom image_view import *\nfrom mesh_view import MultiMeshModel\nfrom mesh_view import MultiMeshView\nfrom multiprocessing import Process\nfrom video_capture import VideoCapture\nfrom torch.autograd import Variable\nfrom tasks.camera_to_image import CfgLoader\nimport graphics_math as gm\n\n\ndef render_image(model, pose):\n pose = torch.from_numpy(np.reshape(poses[0], (1, 1, 1, 7)).astype(np.\n float32))\n pose = Variable(pose).cpu()\n img = model(pose)\n return img\n\n\ndef checker_board():\n return cv2.cvtColor(cv2.imread('checkerboard.jpg'), cv2.COLOR_BGR2RGB)\n\n\ndef to_img(x):\n x = 0.5 * (x + 1)\n x = x.clamp(0, 1)\n x = x.view(x.size(0), 3, 128, 128)\n return x\n\n\ndef to_numpy_img(x):\n x = to_img(x)\n x = x.detach().numpy().squeeze() if len(x.shape) == 4 else x\n x = np.transpose(x, (1, 2, 0))\n x *= 255.0\n x = np.clip(x, 0.0, 255.0)\n x = x.astype(np.uint8)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n return x\n\n\ndef to_torch_pose(x):\n x = torch.from_numpy(np.reshape(x, (1, 1, 1, 7)).astype(np.float32))\n x = Variable(x).cpu()\n return x\n\n\ndef read_poses(pose_file):\n lines = open(pose_file).read().splitlines()\n poses = [[float(z) for z in x.split()[1:]] for x in lines]\n return poses\n\n\ndef axes():\n return np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]), np.array([\n 0, 1, 0, 2, 0, 3]), np.array([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [\n 0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])\n\n\ndef update_axes(which, angle, orientation):\n axis = orientation[:, which]\n transformation = gm.rotate(angle, axis)\n orientation = transformation.dot(orientation)\n return orientation\n\n\nload_configuration = True\nload_model = True\nload_weights = True\nload_frames = True\nload_poses = True\nif load_configuration:\n print(f'Loading configuration ...')\n cfg = CfgLoader().get_cfg('cpu')\n if load_model:\n print('Loading model ...')\n model = cfg['model']\n if load_weights:\n print('Loading model weights ...')\n weights_file = cfg['weights_file']\n model.load_state_dict(torch.load(f'./{weights_file}'))\nposes_file = (f\"{cfg['target_dir']}/poses.txt\" if load_configuration else\n 'targets/camera_to_image/poses.txt')\nmovie_file = (f\"{cfg['target_dir']}/movie.mov\" if load_configuration else\n 'targets/camera_to_image/movie.mov')\nif load_frames:\n print(f'Loading frames: ./{movie_file} ...')\n frames = VideoCapture(f'./{movie_file}')\nif load_poses:\n print(f'Loading poses: ./{poses_file} ...')\n poses = read_poses(f'./{poses_file}')\n poses = [[float(x) for x in l.split()[1:]] for l in open(poses_file).\n read().splitlines()]\n poses = np.array(poses)\nprint('Finding bounding sphere ...')\ntranslations = np.zeros((poses.shape[0], 4))\ntranslations[:, 0:3] = poses[:, 0:3]\ntranslations[:, 3] = 1\nquaternions = poses[:, 3:7]\nvertices = translations[:, 0:3]\npoints = translations.T\nnum_points = translations.shape[0]\nA = 2 * points\nA[3, :] = 1\nf = np.zeros((1, num_points))\nfor i in range(num_points):\n f[0, i] = np.linalg.norm(points[0:3, i]) ** 2\nC, res, rank, svals = np.linalg.lstsq(A.T, f.T, rcond=None)\nradius = (np.linalg.norm(C[0:3]) ** 2 + C[3]) ** (1 / 2)\nprint(f'C = {C}, R = {(np.linalg.norm(C[0:3]) ** 2 + C[3]) ** (1 / 2)}')\nprint(f'C[0] = {C[0]}, C[1] = {C[1]}, C[2] = {C[2]}')\napp = GlfwApp()\napp.init()\nmulti_controller = GlfwMultiController()\nwidth, height = 640, 480\nxpos, ypos, title = 0, 0, 'Camera'\npoint_fragment_shader = 'point_fragment.glsl'\npoint_vertex_shader = 'point_vertex.glsl'\nvertices = np.concatenate((vertices, np.array([vertices[0, :]])))\nbig_point = np.array([[0, 0, 0]])\nmulti_mesh_model = MultiMeshModel([{'name': 'bounding_sphere', 'type':\n 'mesh', 'mesh': 'sphere.obj', 'M': gm.uniform_scale(5.0).dot(gm.\n translate(0, -0.1, 0)), 'fragment': 'wireframe_fragment.glsl', 'vertex':\n 'wireframe_vertex.glsl', 'geometry': 'wireframe_geometry.glsl', 'color':\n np.array([1.0, 1.0, 1.0]), 'opacity': 0.5}, {'name': 'points', 'type':\n 'points', 'mesh': vertices, 'M': gm.uniform_scale(0.5 / radius).dot(gm.\n translate(-C[0], -C[1], -C[2])), 'fragment': 'point_fragment.glsl',\n 'vertex': 'point_vertex.glsl', 'geometry': None, 'color': np.array([1.0,\n 0.0, 0.0])}, {'name': 'axes', 'type': 'lines', 'mesh': axes(), 'R': np.\n eye(4), 'T': gm.translate(0, 0, 0), 'scale': gm.uniform_scale(0.2), 'M':\n gm.uniform_scale(0.2), 'fragment': 'line_fragment.glsl', 'vertex':\n 'line_vertex.glsl', 'geometry': 'line_geometry.glsl', 'color': np.array\n ([0.0, 1.0, 0.0])}])\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj['T'] = gm.translate(tx, ty, tz).dot(obj['T'])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\nmulti_mesh_view = MultiMeshView()\neye = [0.0, 0.0, 2.0, 1.0]\nat = [0.0, 0.0, 0.0, 1.0]\nup = [0.0, 1.0, 0.0, 1.0]\nfov = 45.0\nnear = 0.0001\nfar = 100\nlight_position = [0.0, 0.0, 4.0]\nmulti_mesh_view.set_camera(eye, at, up, fov, near, far)\nmulti_mesh_view.set_light_position(light_position)\nmesh_controller = GlfwController(width, height, xpos, ypos, title,\n multi_mesh_view, multi_mesh_model)\nmesh_controller.register_user_key_callback(KeyCallbackHandler(multi_mesh_model)\n )\nmulti_controller.add(mesh_controller)\nimage_fragment_shader = 'image_fragment.glsl'\nimage_vertex_shader = 'image_vertex.glsl'\noutput_image = to_numpy_img(model(to_torch_pose(poses[0]))\n ) if load_model else checker_board()\nimage_model = ImageModel(output_image)\nimage_view = ImageView(image_fragment_shader, image_vertex_shader)\nimage_controller = GlfwController(400, 300, 500, 100, 'Image View',\n image_view, image_model)\nmulti_controller.add(image_controller)\nmulti_controller.run()\n",
"<import token>\n\n\ndef render_image(model, pose):\n pose = torch.from_numpy(np.reshape(poses[0], (1, 1, 1, 7)).astype(np.\n float32))\n pose = Variable(pose).cpu()\n img = model(pose)\n return img\n\n\ndef checker_board():\n return cv2.cvtColor(cv2.imread('checkerboard.jpg'), cv2.COLOR_BGR2RGB)\n\n\ndef to_img(x):\n x = 0.5 * (x + 1)\n x = x.clamp(0, 1)\n x = x.view(x.size(0), 3, 128, 128)\n return x\n\n\ndef to_numpy_img(x):\n x = to_img(x)\n x = x.detach().numpy().squeeze() if len(x.shape) == 4 else x\n x = np.transpose(x, (1, 2, 0))\n x *= 255.0\n x = np.clip(x, 0.0, 255.0)\n x = x.astype(np.uint8)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n return x\n\n\ndef to_torch_pose(x):\n x = torch.from_numpy(np.reshape(x, (1, 1, 1, 7)).astype(np.float32))\n x = Variable(x).cpu()\n return x\n\n\ndef read_poses(pose_file):\n lines = open(pose_file).read().splitlines()\n poses = [[float(z) for z in x.split()[1:]] for x in lines]\n return poses\n\n\ndef axes():\n return np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]), np.array([\n 0, 1, 0, 2, 0, 3]), np.array([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [\n 0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])\n\n\ndef update_axes(which, angle, orientation):\n axis = orientation[:, which]\n transformation = gm.rotate(angle, axis)\n orientation = transformation.dot(orientation)\n return orientation\n\n\nload_configuration = True\nload_model = True\nload_weights = True\nload_frames = True\nload_poses = True\nif load_configuration:\n print(f'Loading configuration ...')\n cfg = CfgLoader().get_cfg('cpu')\n if load_model:\n print('Loading model ...')\n model = cfg['model']\n if load_weights:\n print('Loading model weights ...')\n weights_file = cfg['weights_file']\n model.load_state_dict(torch.load(f'./{weights_file}'))\nposes_file = (f\"{cfg['target_dir']}/poses.txt\" if load_configuration else\n 'targets/camera_to_image/poses.txt')\nmovie_file = (f\"{cfg['target_dir']}/movie.mov\" if load_configuration else\n 'targets/camera_to_image/movie.mov')\nif load_frames:\n print(f'Loading frames: ./{movie_file} ...')\n frames = VideoCapture(f'./{movie_file}')\nif load_poses:\n print(f'Loading poses: ./{poses_file} ...')\n poses = read_poses(f'./{poses_file}')\n poses = [[float(x) for x in l.split()[1:]] for l in open(poses_file).\n read().splitlines()]\n poses = np.array(poses)\nprint('Finding bounding sphere ...')\ntranslations = np.zeros((poses.shape[0], 4))\ntranslations[:, 0:3] = poses[:, 0:3]\ntranslations[:, 3] = 1\nquaternions = poses[:, 3:7]\nvertices = translations[:, 0:3]\npoints = translations.T\nnum_points = translations.shape[0]\nA = 2 * points\nA[3, :] = 1\nf = np.zeros((1, num_points))\nfor i in range(num_points):\n f[0, i] = np.linalg.norm(points[0:3, i]) ** 2\nC, res, rank, svals = np.linalg.lstsq(A.T, f.T, rcond=None)\nradius = (np.linalg.norm(C[0:3]) ** 2 + C[3]) ** (1 / 2)\nprint(f'C = {C}, R = {(np.linalg.norm(C[0:3]) ** 2 + C[3]) ** (1 / 2)}')\nprint(f'C[0] = {C[0]}, C[1] = {C[1]}, C[2] = {C[2]}')\napp = GlfwApp()\napp.init()\nmulti_controller = GlfwMultiController()\nwidth, height = 640, 480\nxpos, ypos, title = 0, 0, 'Camera'\npoint_fragment_shader = 'point_fragment.glsl'\npoint_vertex_shader = 'point_vertex.glsl'\nvertices = np.concatenate((vertices, np.array([vertices[0, :]])))\nbig_point = np.array([[0, 0, 0]])\nmulti_mesh_model = MultiMeshModel([{'name': 'bounding_sphere', 'type':\n 'mesh', 'mesh': 'sphere.obj', 'M': gm.uniform_scale(5.0).dot(gm.\n translate(0, -0.1, 0)), 'fragment': 'wireframe_fragment.glsl', 'vertex':\n 'wireframe_vertex.glsl', 'geometry': 'wireframe_geometry.glsl', 'color':\n np.array([1.0, 1.0, 1.0]), 'opacity': 0.5}, {'name': 'points', 'type':\n 'points', 'mesh': vertices, 'M': gm.uniform_scale(0.5 / radius).dot(gm.\n translate(-C[0], -C[1], -C[2])), 'fragment': 'point_fragment.glsl',\n 'vertex': 'point_vertex.glsl', 'geometry': None, 'color': np.array([1.0,\n 0.0, 0.0])}, {'name': 'axes', 'type': 'lines', 'mesh': axes(), 'R': np.\n eye(4), 'T': gm.translate(0, 0, 0), 'scale': gm.uniform_scale(0.2), 'M':\n gm.uniform_scale(0.2), 'fragment': 'line_fragment.glsl', 'vertex':\n 'line_vertex.glsl', 'geometry': 'line_geometry.glsl', 'color': np.array\n ([0.0, 1.0, 0.0])}])\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj['T'] = gm.translate(tx, ty, tz).dot(obj['T'])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\nmulti_mesh_view = MultiMeshView()\neye = [0.0, 0.0, 2.0, 1.0]\nat = [0.0, 0.0, 0.0, 1.0]\nup = [0.0, 1.0, 0.0, 1.0]\nfov = 45.0\nnear = 0.0001\nfar = 100\nlight_position = [0.0, 0.0, 4.0]\nmulti_mesh_view.set_camera(eye, at, up, fov, near, far)\nmulti_mesh_view.set_light_position(light_position)\nmesh_controller = GlfwController(width, height, xpos, ypos, title,\n multi_mesh_view, multi_mesh_model)\nmesh_controller.register_user_key_callback(KeyCallbackHandler(multi_mesh_model)\n )\nmulti_controller.add(mesh_controller)\nimage_fragment_shader = 'image_fragment.glsl'\nimage_vertex_shader = 'image_vertex.glsl'\noutput_image = to_numpy_img(model(to_torch_pose(poses[0]))\n ) if load_model else checker_board()\nimage_model = ImageModel(output_image)\nimage_view = ImageView(image_fragment_shader, image_vertex_shader)\nimage_controller = GlfwController(400, 300, 500, 100, 'Image View',\n image_view, image_model)\nmulti_controller.add(image_controller)\nmulti_controller.run()\n",
"<import token>\n\n\ndef render_image(model, pose):\n pose = torch.from_numpy(np.reshape(poses[0], (1, 1, 1, 7)).astype(np.\n float32))\n pose = Variable(pose).cpu()\n img = model(pose)\n return img\n\n\ndef checker_board():\n return cv2.cvtColor(cv2.imread('checkerboard.jpg'), cv2.COLOR_BGR2RGB)\n\n\ndef to_img(x):\n x = 0.5 * (x + 1)\n x = x.clamp(0, 1)\n x = x.view(x.size(0), 3, 128, 128)\n return x\n\n\ndef to_numpy_img(x):\n x = to_img(x)\n x = x.detach().numpy().squeeze() if len(x.shape) == 4 else x\n x = np.transpose(x, (1, 2, 0))\n x *= 255.0\n x = np.clip(x, 0.0, 255.0)\n x = x.astype(np.uint8)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n return x\n\n\ndef to_torch_pose(x):\n x = torch.from_numpy(np.reshape(x, (1, 1, 1, 7)).astype(np.float32))\n x = Variable(x).cpu()\n return x\n\n\ndef read_poses(pose_file):\n lines = open(pose_file).read().splitlines()\n poses = [[float(z) for z in x.split()[1:]] for x in lines]\n return poses\n\n\ndef axes():\n return np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]), np.array([\n 0, 1, 0, 2, 0, 3]), np.array([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [\n 0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])\n\n\ndef update_axes(which, angle, orientation):\n axis = orientation[:, which]\n transformation = gm.rotate(angle, axis)\n orientation = transformation.dot(orientation)\n return orientation\n\n\n<assignment token>\nif load_configuration:\n print(f'Loading configuration ...')\n cfg = CfgLoader().get_cfg('cpu')\n if load_model:\n print('Loading model ...')\n model = cfg['model']\n if load_weights:\n print('Loading model weights ...')\n weights_file = cfg['weights_file']\n model.load_state_dict(torch.load(f'./{weights_file}'))\n<assignment token>\nif load_frames:\n print(f'Loading frames: ./{movie_file} ...')\n frames = VideoCapture(f'./{movie_file}')\nif load_poses:\n print(f'Loading poses: ./{poses_file} ...')\n poses = read_poses(f'./{poses_file}')\n poses = [[float(x) for x in l.split()[1:]] for l in open(poses_file).\n read().splitlines()]\n poses = np.array(poses)\nprint('Finding bounding sphere ...')\n<assignment token>\nfor i in range(num_points):\n f[0, i] = np.linalg.norm(points[0:3, i]) ** 2\n<assignment token>\nprint(f'C = {C}, R = {(np.linalg.norm(C[0:3]) ** 2 + C[3]) ** (1 / 2)}')\nprint(f'C[0] = {C[0]}, C[1] = {C[1]}, C[2] = {C[2]}')\n<assignment token>\napp.init()\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj['T'] = gm.translate(tx, ty, tz).dot(obj['T'])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\n<assignment token>\nmulti_mesh_view.set_camera(eye, at, up, fov, near, far)\nmulti_mesh_view.set_light_position(light_position)\n<assignment token>\nmesh_controller.register_user_key_callback(KeyCallbackHandler(multi_mesh_model)\n )\nmulti_controller.add(mesh_controller)\n<assignment token>\nmulti_controller.add(image_controller)\nmulti_controller.run()\n",
"<import token>\n\n\ndef render_image(model, pose):\n pose = torch.from_numpy(np.reshape(poses[0], (1, 1, 1, 7)).astype(np.\n float32))\n pose = Variable(pose).cpu()\n img = model(pose)\n return img\n\n\ndef checker_board():\n return cv2.cvtColor(cv2.imread('checkerboard.jpg'), cv2.COLOR_BGR2RGB)\n\n\ndef to_img(x):\n x = 0.5 * (x + 1)\n x = x.clamp(0, 1)\n x = x.view(x.size(0), 3, 128, 128)\n return x\n\n\ndef to_numpy_img(x):\n x = to_img(x)\n x = x.detach().numpy().squeeze() if len(x.shape) == 4 else x\n x = np.transpose(x, (1, 2, 0))\n x *= 255.0\n x = np.clip(x, 0.0, 255.0)\n x = x.astype(np.uint8)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n return x\n\n\ndef to_torch_pose(x):\n x = torch.from_numpy(np.reshape(x, (1, 1, 1, 7)).astype(np.float32))\n x = Variable(x).cpu()\n return x\n\n\ndef read_poses(pose_file):\n lines = open(pose_file).read().splitlines()\n poses = [[float(z) for z in x.split()[1:]] for x in lines]\n return poses\n\n\ndef axes():\n return np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]), np.array([\n 0, 1, 0, 2, 0, 3]), np.array([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [\n 0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])\n\n\ndef update_axes(which, angle, orientation):\n axis = orientation[:, which]\n transformation = gm.rotate(angle, axis)\n orientation = transformation.dot(orientation)\n return orientation\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj['T'] = gm.translate(tx, ty, tz).dot(obj['T'])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef render_image(model, pose):\n pose = torch.from_numpy(np.reshape(poses[0], (1, 1, 1, 7)).astype(np.\n float32))\n pose = Variable(pose).cpu()\n img = model(pose)\n return img\n\n\ndef checker_board():\n return cv2.cvtColor(cv2.imread('checkerboard.jpg'), cv2.COLOR_BGR2RGB)\n\n\n<function token>\n\n\ndef to_numpy_img(x):\n x = to_img(x)\n x = x.detach().numpy().squeeze() if len(x.shape) == 4 else x\n x = np.transpose(x, (1, 2, 0))\n x *= 255.0\n x = np.clip(x, 0.0, 255.0)\n x = x.astype(np.uint8)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n return x\n\n\ndef to_torch_pose(x):\n x = torch.from_numpy(np.reshape(x, (1, 1, 1, 7)).astype(np.float32))\n x = Variable(x).cpu()\n return x\n\n\ndef read_poses(pose_file):\n lines = open(pose_file).read().splitlines()\n poses = [[float(z) for z in x.split()[1:]] for x in lines]\n return poses\n\n\ndef axes():\n return np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]), np.array([\n 0, 1, 0, 2, 0, 3]), np.array([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [\n 0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])\n\n\ndef update_axes(which, angle, orientation):\n axis = orientation[:, which]\n transformation = gm.rotate(angle, axis)\n orientation = transformation.dot(orientation)\n return orientation\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj['T'] = gm.translate(tx, ty, tz).dot(obj['T'])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef render_image(model, pose):\n pose = torch.from_numpy(np.reshape(poses[0], (1, 1, 1, 7)).astype(np.\n float32))\n pose = Variable(pose).cpu()\n img = model(pose)\n return img\n\n\ndef checker_board():\n return cv2.cvtColor(cv2.imread('checkerboard.jpg'), cv2.COLOR_BGR2RGB)\n\n\n<function token>\n\n\ndef to_numpy_img(x):\n x = to_img(x)\n x = x.detach().numpy().squeeze() if len(x.shape) == 4 else x\n x = np.transpose(x, (1, 2, 0))\n x *= 255.0\n x = np.clip(x, 0.0, 255.0)\n x = x.astype(np.uint8)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n return x\n\n\n<function token>\n\n\ndef read_poses(pose_file):\n lines = open(pose_file).read().splitlines()\n poses = [[float(z) for z in x.split()[1:]] for x in lines]\n return poses\n\n\ndef axes():\n return np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]), np.array([\n 0, 1, 0, 2, 0, 3]), np.array([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [\n 0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])\n\n\ndef update_axes(which, angle, orientation):\n axis = orientation[:, which]\n transformation = gm.rotate(angle, axis)\n orientation = transformation.dot(orientation)\n return orientation\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj['T'] = gm.translate(tx, ty, tz).dot(obj['T'])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef render_image(model, pose):\n pose = torch.from_numpy(np.reshape(poses[0], (1, 1, 1, 7)).astype(np.\n float32))\n pose = Variable(pose).cpu()\n img = model(pose)\n return img\n\n\ndef checker_board():\n return cv2.cvtColor(cv2.imread('checkerboard.jpg'), cv2.COLOR_BGR2RGB)\n\n\n<function token>\n\n\ndef to_numpy_img(x):\n x = to_img(x)\n x = x.detach().numpy().squeeze() if len(x.shape) == 4 else x\n x = np.transpose(x, (1, 2, 0))\n x *= 255.0\n x = np.clip(x, 0.0, 255.0)\n x = x.astype(np.uint8)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n return x\n\n\n<function token>\n<function token>\n\n\ndef axes():\n return np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]), np.array([\n 0, 1, 0, 2, 0, 3]), np.array([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [\n 0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])\n\n\ndef update_axes(which, angle, orientation):\n axis = orientation[:, which]\n transformation = gm.rotate(angle, axis)\n orientation = transformation.dot(orientation)\n return orientation\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj['T'] = gm.translate(tx, ty, tz).dot(obj['T'])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n\n\ndef checker_board():\n return cv2.cvtColor(cv2.imread('checkerboard.jpg'), cv2.COLOR_BGR2RGB)\n\n\n<function token>\n\n\ndef to_numpy_img(x):\n x = to_img(x)\n x = x.detach().numpy().squeeze() if len(x.shape) == 4 else x\n x = np.transpose(x, (1, 2, 0))\n x *= 255.0\n x = np.clip(x, 0.0, 255.0)\n x = x.astype(np.uint8)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n return x\n\n\n<function token>\n<function token>\n\n\ndef axes():\n return np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]), np.array([\n 0, 1, 0, 2, 0, 3]), np.array([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [\n 0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])\n\n\ndef update_axes(which, angle, orientation):\n axis = orientation[:, which]\n transformation = gm.rotate(angle, axis)\n orientation = transformation.dot(orientation)\n return orientation\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj['T'] = gm.translate(tx, ty, tz).dot(obj['T'])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n\n\ndef checker_board():\n return cv2.cvtColor(cv2.imread('checkerboard.jpg'), cv2.COLOR_BGR2RGB)\n\n\n<function token>\n\n\ndef to_numpy_img(x):\n x = to_img(x)\n x = x.detach().numpy().squeeze() if len(x.shape) == 4 else x\n x = np.transpose(x, (1, 2, 0))\n x *= 255.0\n x = np.clip(x, 0.0, 255.0)\n x = x.astype(np.uint8)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n return x\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef update_axes(which, angle, orientation):\n axis = orientation[:, which]\n transformation = gm.rotate(angle, axis)\n orientation = transformation.dot(orientation)\n return orientation\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj['T'] = gm.translate(tx, ty, tz).dot(obj['T'])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef to_numpy_img(x):\n x = to_img(x)\n x = x.detach().numpy().squeeze() if len(x.shape) == 4 else x\n x = np.transpose(x, (1, 2, 0))\n x *= 255.0\n x = np.clip(x, 0.0, 255.0)\n x = x.astype(np.uint8)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n return x\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef update_axes(which, angle, orientation):\n axis = orientation[:, which]\n transformation = gm.rotate(angle, axis)\n orientation = transformation.dot(orientation)\n return orientation\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj['T'] = gm.translate(tx, ty, tz).dot(obj['T'])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef update_axes(which, angle, orientation):\n axis = orientation[:, which]\n transformation = gm.rotate(angle, axis)\n orientation = transformation.dot(orientation)\n return orientation\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj['T'] = gm.translate(tx, ty, tz).dot(obj['T'])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n\n def update_translation(self, name, tx, ty, tz):\n obj = self.data.name_to_mesh_info[name]\n obj['T'] = gm.translate(tx, ty, tz).dot(obj['T'])\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n <function token>\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n\n def key_handler(self, key, scancode, action, mods):\n if key == glfw.KEY_W and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, 0.025)\n elif key == glfw.KEY_A and action == glfw.PRESS:\n self.update_translation('axes', -0.025, 0, 0)\n elif key == glfw.KEY_S and action == glfw.PRESS:\n self.update_translation('axes', 0, 0, -0.025)\n elif key == glfw.KEY_D and action == glfw.PRESS:\n self.update_translation('axes', 0.025, 0, 0.0)\n elif key == glfw.KEY_R and action == glfw.PRESS:\n self.update_translation('axes', 0, 0.025, 0.0)\n elif key == glfw.KEY_F and action == glfw.PRESS:\n self.update_translation('axes', 0, -0.025, 0.0)\n elif key == glfw.KEY_U and action == glfw.PRESS:\n self.update_orientation('axes', 0, 0.25)\n elif key == glfw.KEY_J and action == glfw.PRESS:\n self.update_orientation('axes', 0, -0.25)\n elif key == glfw.KEY_H and action == glfw.PRESS:\n self.update_orientation('axes', 1, 0.25)\n elif key == glfw.KEY_K and action == glfw.PRESS:\n self.update_orientation('axes', 1, -0.25)\n elif key == glfw.KEY_O and action == glfw.PRESS:\n self.update_orientation('axes', 2, 0.25)\n elif key == glfw.KEY_L and action == glfw.PRESS:\n self.update_orientation('axes', 2, -0.25)\n elif key == glfw.KEY_SPACE and action == glfw.PRESS:\n print('Render!')\n obj = self.data.name_to_mesh_info['axes']\n self.update_model_matrix('axes')\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n\n def update_orientation(self, name, which, angle):\n obj = self.data.name_to_mesh_info[name]\n R = obj['R']\n axis = np.expand_dims(R[:, which][0:3], 0).T\n M = gm.rotate(angle, axis)\n obj['R'] = gm.rotate(angle, axis).dot(R)\n <function token>\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n <function token>\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n <function token>\n <function token>\n\n def update_model_matrix(self, name):\n obj = self.data.name_to_mesh_info[name]\n obj['M'] = np.linalg.multi_dot([obj['T'], obj['R'], obj['scale']])\n <function token>\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n\n def __init__(self, data):\n self.data = data\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass KeyCallbackHandler:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<class token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,310 |
3bf0e6291f64e29d2d44a7534437e37f2cf2a0d8
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 10:49:28 2019
@author: ma56473
"""
from keras import backend as K
from keras.layers import Layer
# Custom loss function that takes multi-tensor input
# Uses the function-in-function trick to bypass Keras restrictions
def commitment_crossentropy(r1, r2, lambda_0, lambda_1, lambda_2):
# Core function
def loss(y_true, y_pred):
return lambda_0 * K.binary_crossentropy(y_true, y_pred) + lambda_1 * r1 + lambda_2 * r2
# Return function
return loss
# Restricted commitment loss (using only R1)
def r1_crossentropy(r1, lambda_1):
# Core functioon
def loss(y_true, y_pred):
return K.binary_crossentropy(y_true, y_pred) + lambda_1 * r1
# Return function
return loss
# Trainable prototype layer with cosine-distance embedding
class CosineEmbedding(Layer):
def __init__(self, num_vectors, latent_dim, **kwargs):
self.num_vectors = num_vectors
self.latent_dim = latent_dim
super(CosineEmbedding, self).__init__(**kwargs)
def build(self, input_shape):
# Trainable p vectors
self.trainable_p = self.add_weight(name='trainable_p',
shape=(self.num_vectors, self.latent_dim),
initializer='glorot_uniform',
trainable=True)
super(CosineEmbedding, self).build(input_shape)
# Main functionality goes here
def call(self, x):
# Cosine similarity via normalized inner products
# Normalize batch
norm_x = K.l2_normalize(x, axis=-1)
# Normalize p vectors
norm_trainable_p = K.l2_normalize(self.trainable_p, axis=-1)
# Compute similarities
trainable_dist = K.dot(norm_x, K.transpose(norm_trainable_p))
# Concatenated output
distances = trainable_dist
# If similarity, output negative max instead
# R1 cost function (min over batch, sum over p)
r1_cost = -K.mean(K.max(distances, axis=0), axis=-1)
# R2 cost function (min over p, sum over batch)
r2_cost = -K.mean(K.max(distances, axis=-1), axis=-1)
# Return triplet
return [distances, r1_cost, r2_cost]
def compute_output_shape(self, input_shape):
# Always returns scalars for the two extra terms
return [(input_shape[0], self.num_vectors), (1,), (1,)]
# Trainable prototype layer with Euclidean distance embedding
class EuclideanEmbedding(Layer):
def __init__(self, num_vectors, latent_dim, **kwargs):
self.num_vectors = num_vectors
self.latent_dim = latent_dim
super(EuclideanEmbedding, self).__init__(**kwargs)
def build(self, input_shape):
# Trainable p vectors
self.trainable_p = self.add_weight(name='trainable_p',
shape=(self.num_vectors, self.latent_dim),
initializer='glorot_uniform',
trainable=True)
super(EuclideanEmbedding, self).build(input_shape)
# Main functionality goes here
def call(self, x):
# Use axis expansion on x for fast computation
x_dim = K.expand_dims(x, axis=1)
# Distance to trainable p vectors
trainable_dist = K.sqrt(K.sum(K.square(x_dim - self.trainable_p), axis=-1))
# Concatenated output
distances = trainable_dist
# R1 cost function (min over batch, sum over p)
r1_cost = K.mean(K.min(distances, axis=0), axis=-1)
# R2 cost function (min over p, sum over batch)
r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)
# Return triplet
return [distances, r1_cost, r2_cost]
def compute_output_shape(self, input_shape):
# Always returns scalars for the two extra terms
return [(input_shape[0], self.num_vectors), (1,), (1,)]
# Trainable prototype layer with Euclidean distance embedding
class L1Embedding(Layer):
def __init__(self, num_vectors, latent_dim, **kwargs):
self.num_vectors = num_vectors
self.latent_dim = latent_dim
super(L1Embedding, self).__init__(**kwargs)
def build(self, input_shape):
# Trainable p vectors
self.trainable_p = self.add_weight(name='trainable_p',
shape=(self.num_vectors, self.latent_dim),
initializer='glorot_uniform',
trainable=True)
super(L1Embedding, self).build(input_shape)
# Main functionality goes here
def call(self, x):
# Use axis expansion on x for fast computation
x_dim = K.expand_dims(x, axis=1)
# Distance to trainable p vectors
trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)
# Concatenated output
distances = trainable_dist
# R1 cost function (min over batch, sum over p)
r1_cost = K.mean(K.min(distances, axis=0), axis=-1)
# R2 cost function (min over p, sum over batch)
r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)
# Return triplet
return [distances, r1_cost, r2_cost]
def compute_output_shape(self, input_shape):
# Always returns scalars for the two extra terms
return [(input_shape[0], self.num_vectors), (1,), (1,)]
|
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 22 10:49:28 2019\r\n\r\n@author: ma56473\r\n\"\"\"\r\n\r\nfrom keras import backend as K\r\nfrom keras.layers import Layer\r\n\r\n# Custom loss function that takes multi-tensor input\r\n# Uses the function-in-function trick to bypass Keras restrictions\r\ndef commitment_crossentropy(r1, r2, lambda_0, lambda_1, lambda_2):\r\n # Core function\r\n def loss(y_true, y_pred):\r\n return lambda_0 * K.binary_crossentropy(y_true, y_pred) + lambda_1 * r1 + lambda_2 * r2\r\n \r\n # Return function\r\n return loss\r\n\r\n# Restricted commitment loss (using only R1)\r\ndef r1_crossentropy(r1, lambda_1):\r\n # Core functioon\r\n def loss(y_true, y_pred):\r\n return K.binary_crossentropy(y_true, y_pred) + lambda_1 * r1\r\n \r\n # Return function\r\n return loss\r\n \r\n# Trainable prototype layer with cosine-distance embedding\r\nclass CosineEmbedding(Layer):\r\n def __init__(self, num_vectors, latent_dim, **kwargs):\r\n self.num_vectors = num_vectors\r\n self.latent_dim = latent_dim\r\n \r\n super(CosineEmbedding, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n # Trainable p vectors\r\n self.trainable_p = self.add_weight(name='trainable_p',\r\n shape=(self.num_vectors, self.latent_dim),\r\n initializer='glorot_uniform',\r\n trainable=True)\r\n\r\n super(CosineEmbedding, self).build(input_shape)\r\n \r\n # Main functionality goes here\r\n def call(self, x): \r\n # Cosine similarity via normalized inner products\r\n # Normalize batch\r\n norm_x = K.l2_normalize(x, axis=-1)\r\n # Normalize p vectors\r\n norm_trainable_p = K.l2_normalize(self.trainable_p, axis=-1)\r\n # Compute similarities\r\n trainable_dist = K.dot(norm_x, K.transpose(norm_trainable_p))\r\n\r\n # Concatenated output\r\n distances = trainable_dist\r\n \r\n # If similarity, output negative max instead\r\n # R1 cost function (min over batch, sum over p)\r\n r1_cost = -K.mean(K.max(distances, axis=0), axis=-1)\r\n \r\n # R2 cost function (min over p, sum over batch)\r\n r2_cost = -K.mean(K.max(distances, axis=-1), axis=-1)\r\n \r\n # Return triplet\r\n return [distances, r1_cost, r2_cost]\r\n \r\n def compute_output_shape(self, input_shape):\r\n # Always returns scalars for the two extra terms\r\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\r\n \r\n# Trainable prototype layer with Euclidean distance embedding\r\nclass EuclideanEmbedding(Layer):\r\n def __init__(self, num_vectors, latent_dim, **kwargs):\r\n self.num_vectors = num_vectors\r\n self.latent_dim = latent_dim\r\n \r\n super(EuclideanEmbedding, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n # Trainable p vectors\r\n self.trainable_p = self.add_weight(name='trainable_p',\r\n shape=(self.num_vectors, self.latent_dim),\r\n initializer='glorot_uniform',\r\n trainable=True)\r\n\r\n super(EuclideanEmbedding, self).build(input_shape)\r\n \r\n # Main functionality goes here\r\n def call(self, x): \r\n # Use axis expansion on x for fast computation\r\n x_dim = K.expand_dims(x, axis=1)\r\n # Distance to trainable p vectors\r\n trainable_dist = K.sqrt(K.sum(K.square(x_dim - self.trainable_p), axis=-1))\r\n\r\n # Concatenated output\r\n distances = trainable_dist\r\n \r\n # R1 cost function (min over batch, sum over p)\r\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\r\n \r\n # R2 cost function (min over p, sum over batch)\r\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\r\n \r\n # Return triplet\r\n return [distances, r1_cost, r2_cost]\r\n \r\n def compute_output_shape(self, input_shape):\r\n # Always returns scalars for the two extra terms\r\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\r\n \r\n# Trainable prototype layer with Euclidean distance embedding\r\nclass L1Embedding(Layer):\r\n def __init__(self, num_vectors, latent_dim, **kwargs):\r\n self.num_vectors = num_vectors\r\n self.latent_dim = latent_dim\r\n \r\n super(L1Embedding, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n # Trainable p vectors\r\n self.trainable_p = self.add_weight(name='trainable_p',\r\n shape=(self.num_vectors, self.latent_dim),\r\n initializer='glorot_uniform',\r\n trainable=True)\r\n\r\n super(L1Embedding, self).build(input_shape)\r\n \r\n # Main functionality goes here\r\n def call(self, x): \r\n # Use axis expansion on x for fast computation\r\n x_dim = K.expand_dims(x, axis=1)\r\n # Distance to trainable p vectors\r\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\r\n\r\n # Concatenated output\r\n distances = trainable_dist\r\n \r\n # R1 cost function (min over batch, sum over p)\r\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\r\n \r\n # R2 cost function (min over p, sum over batch)\r\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\r\n \r\n # Return triplet\r\n return [distances, r1_cost, r2_cost]\r\n \r\n def compute_output_shape(self, input_shape):\r\n # Always returns scalars for the two extra terms\r\n return [(input_shape[0], self.num_vectors), (1,), (1,)]",
"<docstring token>\nfrom keras import backend as K\nfrom keras.layers import Layer\n\n\ndef commitment_crossentropy(r1, r2, lambda_0, lambda_1, lambda_2):\n\n def loss(y_true, y_pred):\n return lambda_0 * K.binary_crossentropy(y_true, y_pred\n ) + lambda_1 * r1 + lambda_2 * r2\n return loss\n\n\ndef r1_crossentropy(r1, lambda_1):\n\n def loss(y_true, y_pred):\n return K.binary_crossentropy(y_true, y_pred) + lambda_1 * r1\n return loss\n\n\nclass CosineEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(CosineEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(CosineEmbedding, self).build(input_shape)\n\n def call(self, x):\n norm_x = K.l2_normalize(x, axis=-1)\n norm_trainable_p = K.l2_normalize(self.trainable_p, axis=-1)\n trainable_dist = K.dot(norm_x, K.transpose(norm_trainable_p))\n distances = trainable_dist\n r1_cost = -K.mean(K.max(distances, axis=0), axis=-1)\n r2_cost = -K.mean(K.max(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass EuclideanEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(EuclideanEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(EuclideanEmbedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sqrt(K.sum(K.square(x_dim - self.trainable_p),\n axis=-1))\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n\n\ndef commitment_crossentropy(r1, r2, lambda_0, lambda_1, lambda_2):\n\n def loss(y_true, y_pred):\n return lambda_0 * K.binary_crossentropy(y_true, y_pred\n ) + lambda_1 * r1 + lambda_2 * r2\n return loss\n\n\ndef r1_crossentropy(r1, lambda_1):\n\n def loss(y_true, y_pred):\n return K.binary_crossentropy(y_true, y_pred) + lambda_1 * r1\n return loss\n\n\nclass CosineEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(CosineEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(CosineEmbedding, self).build(input_shape)\n\n def call(self, x):\n norm_x = K.l2_normalize(x, axis=-1)\n norm_trainable_p = K.l2_normalize(self.trainable_p, axis=-1)\n trainable_dist = K.dot(norm_x, K.transpose(norm_trainable_p))\n distances = trainable_dist\n r1_cost = -K.mean(K.max(distances, axis=0), axis=-1)\n r2_cost = -K.mean(K.max(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass EuclideanEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(EuclideanEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(EuclideanEmbedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sqrt(K.sum(K.square(x_dim - self.trainable_p),\n axis=-1))\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n\n\ndef commitment_crossentropy(r1, r2, lambda_0, lambda_1, lambda_2):\n\n def loss(y_true, y_pred):\n return lambda_0 * K.binary_crossentropy(y_true, y_pred\n ) + lambda_1 * r1 + lambda_2 * r2\n return loss\n\n\n<function token>\n\n\nclass CosineEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(CosineEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(CosineEmbedding, self).build(input_shape)\n\n def call(self, x):\n norm_x = K.l2_normalize(x, axis=-1)\n norm_trainable_p = K.l2_normalize(self.trainable_p, axis=-1)\n trainable_dist = K.dot(norm_x, K.transpose(norm_trainable_p))\n distances = trainable_dist\n r1_cost = -K.mean(K.max(distances, axis=0), axis=-1)\n r2_cost = -K.mean(K.max(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass EuclideanEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(EuclideanEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(EuclideanEmbedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sqrt(K.sum(K.square(x_dim - self.trainable_p),\n axis=-1))\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\nclass CosineEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(CosineEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(CosineEmbedding, self).build(input_shape)\n\n def call(self, x):\n norm_x = K.l2_normalize(x, axis=-1)\n norm_trainable_p = K.l2_normalize(self.trainable_p, axis=-1)\n trainable_dist = K.dot(norm_x, K.transpose(norm_trainable_p))\n distances = trainable_dist\n r1_cost = -K.mean(K.max(distances, axis=0), axis=-1)\n r2_cost = -K.mean(K.max(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass EuclideanEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(EuclideanEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(EuclideanEmbedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sqrt(K.sum(K.square(x_dim - self.trainable_p),\n axis=-1))\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\nclass CosineEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(CosineEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(CosineEmbedding, self).build(input_shape)\n <function token>\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass EuclideanEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(EuclideanEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(EuclideanEmbedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sqrt(K.sum(K.square(x_dim - self.trainable_p),\n axis=-1))\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\nclass CosineEmbedding(Layer):\n <function token>\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(CosineEmbedding, self).build(input_shape)\n <function token>\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass EuclideanEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(EuclideanEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(EuclideanEmbedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sqrt(K.sum(K.square(x_dim - self.trainable_p),\n axis=-1))\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\nclass CosineEmbedding(Layer):\n <function token>\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(CosineEmbedding, self).build(input_shape)\n <function token>\n <function token>\n\n\nclass EuclideanEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(EuclideanEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(EuclideanEmbedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sqrt(K.sum(K.square(x_dim - self.trainable_p),\n axis=-1))\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\nclass CosineEmbedding(Layer):\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass EuclideanEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(EuclideanEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(EuclideanEmbedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sqrt(K.sum(K.square(x_dim - self.trainable_p),\n axis=-1))\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\nclass EuclideanEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(EuclideanEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(EuclideanEmbedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sqrt(K.sum(K.square(x_dim - self.trainable_p),\n axis=-1))\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\nclass EuclideanEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(EuclideanEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(EuclideanEmbedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sqrt(K.sum(K.square(x_dim - self.trainable_p),\n axis=-1))\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n <function token>\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\nclass EuclideanEmbedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(EuclideanEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(EuclideanEmbedding, self).build(input_shape)\n <function token>\n <function token>\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\nclass EuclideanEmbedding(Layer):\n <function token>\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(EuclideanEmbedding, self).build(input_shape)\n <function token>\n <function token>\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\nclass EuclideanEmbedding(Layer):\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n\n def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.num_vectors), (1,), (1,)]\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n\n def call(self, x):\n x_dim = K.expand_dims(x, axis=1)\n trainable_dist = K.sum(K.abs(x_dim - self.trainable_p), axis=-1)\n distances = trainable_dist\n r1_cost = K.mean(K.min(distances, axis=0), axis=-1)\n r2_cost = K.mean(K.min(distances, axis=-1), axis=-1)\n return [distances, r1_cost, r2_cost]\n <function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass L1Embedding(Layer):\n\n def __init__(self, num_vectors, latent_dim, **kwargs):\n self.num_vectors = num_vectors\n self.latent_dim = latent_dim\n super(L1Embedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass L1Embedding(Layer):\n <function token>\n\n def build(self, input_shape):\n self.trainable_p = self.add_weight(name='trainable_p', shape=(self.\n num_vectors, self.latent_dim), initializer='glorot_uniform',\n trainable=True)\n super(L1Embedding, self).build(input_shape)\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass L1Embedding(Layer):\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,311 |
753994fe65b94828ba03b9289e3a452ba1913563
|
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search, Q
def esearch(firstname="", gender=""):
# creating a connection to the Elasticsearch server and assign it to the client variable
client = Elasticsearch()
q = Q("bool", should=[Q("match", firstname=firstname),
Q("match", gender=gender)], minimum_should_match=1)
s = Search(using=client, index="bank").query(q)[0:20]
response = s.execute()
print('Total hits found.', response.hits.total)
search = get_results(response)
return search
def getArticals():
client = Elasticsearch()
res = Search(using=client, index="blog").query()[0:50]
response = res.execute()
print('Total hits found.', response.hits.total)
results = []
i=0
for hit in response:
i=+1
print('hit', i, hit)
result_tuple = ( hit.title, hit.body, hit.tags)
results.append(result_tuple)
print(results)
return results
def getSearchData(title="", body=""):
client = Elasticsearch()
q = Q("bool", should=[Q("match", title=title),
Q("match", body=body)], minimum_should_match=1)
s = Search(using=client, index="blog").query(q)[0:20]
response = s.execute()
print("*****************", q, s)
print('Total hits found.', response.hits.total, response)
results = []
for hit in response:
result_tuple = ( hit.title, hit.body, hit.tags)
print(result_tuple)
results.append(result_tuple)
return results
def get_results(response):
results = []
for hit in response:
result_tuple = (hit.firstname + ' ' + hit.lastname,
hit.email, hit.gender, hit.address)
results.append(result_tuple)
return results
if __name__ == '__main__':
print("Opal guy details:\n", esearch(firstname = "opal"))
print("the first 20 f gender details:\n", esearch(gender = "f"))
|
[
"from elasticsearch import Elasticsearch \nfrom elasticsearch_dsl import Search, Q \n\ndef esearch(firstname=\"\", gender=\"\"): \n\n # creating a connection to the Elasticsearch server and assign it to the client variable \n client = Elasticsearch() \n q = Q(\"bool\", should=[Q(\"match\", firstname=firstname), \n Q(\"match\", gender=gender)], minimum_should_match=1) \n s = Search(using=client, index=\"bank\").query(q)[0:20] \n response = s.execute()\n print('Total hits found.', response.hits.total) \n search = get_results(response) \n return search \n\ndef getArticals():\n client = Elasticsearch() \n \n res = Search(using=client, index=\"blog\").query()[0:50]\n response = res.execute()\n print('Total hits found.', response.hits.total) \n results = [] \n i=0\n for hit in response: \n i=+1\n print('hit', i, hit)\n result_tuple = ( hit.title, hit.body, hit.tags) \n results.append(result_tuple) \n\n print(results)\n return results \n\ndef getSearchData(title=\"\", body=\"\"):\n client = Elasticsearch() \n q = Q(\"bool\", should=[Q(\"match\", title=title), \n Q(\"match\", body=body)], minimum_should_match=1) \n s = Search(using=client, index=\"blog\").query(q)[0:20] \n \n response = s.execute()\n print(\"*****************\", q, s)\n print('Total hits found.', response.hits.total, response) \n results = [] \n \n for hit in response: \n \n result_tuple = ( hit.title, hit.body, hit.tags) \n print(result_tuple) \n results.append(result_tuple) \n return results \n\ndef get_results(response): \n\n results = [] \n for hit in response: \n result_tuple = (hit.firstname + ' ' + hit.lastname,\n hit.email, hit.gender, hit.address) \n results.append(result_tuple) \n return results\n\nif __name__ == '__main__': \n print(\"Opal guy details:\\n\", esearch(firstname = \"opal\"))\n print(\"the first 20 f gender details:\\n\", esearch(gender = \"f\"))",
"from elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Search, Q\n\n\ndef esearch(firstname='', gender=''):\n client = Elasticsearch()\n q = Q('bool', should=[Q('match', firstname=firstname), Q('match',\n gender=gender)], minimum_should_match=1)\n s = Search(using=client, index='bank').query(q)[0:20]\n response = s.execute()\n print('Total hits found.', response.hits.total)\n search = get_results(response)\n return search\n\n\ndef getArticals():\n client = Elasticsearch()\n res = Search(using=client, index='blog').query()[0:50]\n response = res.execute()\n print('Total hits found.', response.hits.total)\n results = []\n i = 0\n for hit in response:\n i = +1\n print('hit', i, hit)\n result_tuple = hit.title, hit.body, hit.tags\n results.append(result_tuple)\n print(results)\n return results\n\n\ndef getSearchData(title='', body=''):\n client = Elasticsearch()\n q = Q('bool', should=[Q('match', title=title), Q('match', body=body)],\n minimum_should_match=1)\n s = Search(using=client, index='blog').query(q)[0:20]\n response = s.execute()\n print('*****************', q, s)\n print('Total hits found.', response.hits.total, response)\n results = []\n for hit in response:\n result_tuple = hit.title, hit.body, hit.tags\n print(result_tuple)\n results.append(result_tuple)\n return results\n\n\ndef get_results(response):\n results = []\n for hit in response:\n result_tuple = (hit.firstname + ' ' + hit.lastname, hit.email, hit.\n gender, hit.address)\n results.append(result_tuple)\n return results\n\n\nif __name__ == '__main__':\n print('Opal guy details:\\n', esearch(firstname='opal'))\n print('the first 20 f gender details:\\n', esearch(gender='f'))\n",
"<import token>\n\n\ndef esearch(firstname='', gender=''):\n client = Elasticsearch()\n q = Q('bool', should=[Q('match', firstname=firstname), Q('match',\n gender=gender)], minimum_should_match=1)\n s = Search(using=client, index='bank').query(q)[0:20]\n response = s.execute()\n print('Total hits found.', response.hits.total)\n search = get_results(response)\n return search\n\n\ndef getArticals():\n client = Elasticsearch()\n res = Search(using=client, index='blog').query()[0:50]\n response = res.execute()\n print('Total hits found.', response.hits.total)\n results = []\n i = 0\n for hit in response:\n i = +1\n print('hit', i, hit)\n result_tuple = hit.title, hit.body, hit.tags\n results.append(result_tuple)\n print(results)\n return results\n\n\ndef getSearchData(title='', body=''):\n client = Elasticsearch()\n q = Q('bool', should=[Q('match', title=title), Q('match', body=body)],\n minimum_should_match=1)\n s = Search(using=client, index='blog').query(q)[0:20]\n response = s.execute()\n print('*****************', q, s)\n print('Total hits found.', response.hits.total, response)\n results = []\n for hit in response:\n result_tuple = hit.title, hit.body, hit.tags\n print(result_tuple)\n results.append(result_tuple)\n return results\n\n\ndef get_results(response):\n results = []\n for hit in response:\n result_tuple = (hit.firstname + ' ' + hit.lastname, hit.email, hit.\n gender, hit.address)\n results.append(result_tuple)\n return results\n\n\nif __name__ == '__main__':\n print('Opal guy details:\\n', esearch(firstname='opal'))\n print('the first 20 f gender details:\\n', esearch(gender='f'))\n",
"<import token>\n\n\ndef esearch(firstname='', gender=''):\n client = Elasticsearch()\n q = Q('bool', should=[Q('match', firstname=firstname), Q('match',\n gender=gender)], minimum_should_match=1)\n s = Search(using=client, index='bank').query(q)[0:20]\n response = s.execute()\n print('Total hits found.', response.hits.total)\n search = get_results(response)\n return search\n\n\ndef getArticals():\n client = Elasticsearch()\n res = Search(using=client, index='blog').query()[0:50]\n response = res.execute()\n print('Total hits found.', response.hits.total)\n results = []\n i = 0\n for hit in response:\n i = +1\n print('hit', i, hit)\n result_tuple = hit.title, hit.body, hit.tags\n results.append(result_tuple)\n print(results)\n return results\n\n\ndef getSearchData(title='', body=''):\n client = Elasticsearch()\n q = Q('bool', should=[Q('match', title=title), Q('match', body=body)],\n minimum_should_match=1)\n s = Search(using=client, index='blog').query(q)[0:20]\n response = s.execute()\n print('*****************', q, s)\n print('Total hits found.', response.hits.total, response)\n results = []\n for hit in response:\n result_tuple = hit.title, hit.body, hit.tags\n print(result_tuple)\n results.append(result_tuple)\n return results\n\n\ndef get_results(response):\n results = []\n for hit in response:\n result_tuple = (hit.firstname + ' ' + hit.lastname, hit.email, hit.\n gender, hit.address)\n results.append(result_tuple)\n return results\n\n\n<code token>\n",
"<import token>\n\n\ndef esearch(firstname='', gender=''):\n client = Elasticsearch()\n q = Q('bool', should=[Q('match', firstname=firstname), Q('match',\n gender=gender)], minimum_should_match=1)\n s = Search(using=client, index='bank').query(q)[0:20]\n response = s.execute()\n print('Total hits found.', response.hits.total)\n search = get_results(response)\n return search\n\n\ndef getArticals():\n client = Elasticsearch()\n res = Search(using=client, index='blog').query()[0:50]\n response = res.execute()\n print('Total hits found.', response.hits.total)\n results = []\n i = 0\n for hit in response:\n i = +1\n print('hit', i, hit)\n result_tuple = hit.title, hit.body, hit.tags\n results.append(result_tuple)\n print(results)\n return results\n\n\ndef getSearchData(title='', body=''):\n client = Elasticsearch()\n q = Q('bool', should=[Q('match', title=title), Q('match', body=body)],\n minimum_should_match=1)\n s = Search(using=client, index='blog').query(q)[0:20]\n response = s.execute()\n print('*****************', q, s)\n print('Total hits found.', response.hits.total, response)\n results = []\n for hit in response:\n result_tuple = hit.title, hit.body, hit.tags\n print(result_tuple)\n results.append(result_tuple)\n return results\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\ndef esearch(firstname='', gender=''):\n client = Elasticsearch()\n q = Q('bool', should=[Q('match', firstname=firstname), Q('match',\n gender=gender)], minimum_should_match=1)\n s = Search(using=client, index='bank').query(q)[0:20]\n response = s.execute()\n print('Total hits found.', response.hits.total)\n search = get_results(response)\n return search\n\n\n<function token>\n\n\ndef getSearchData(title='', body=''):\n client = Elasticsearch()\n q = Q('bool', should=[Q('match', title=title), Q('match', body=body)],\n minimum_should_match=1)\n s = Search(using=client, index='blog').query(q)[0:20]\n response = s.execute()\n print('*****************', q, s)\n print('Total hits found.', response.hits.total, response)\n results = []\n for hit in response:\n result_tuple = hit.title, hit.body, hit.tags\n print(result_tuple)\n results.append(result_tuple)\n return results\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef getSearchData(title='', body=''):\n client = Elasticsearch()\n q = Q('bool', should=[Q('match', title=title), Q('match', body=body)],\n minimum_should_match=1)\n s = Search(using=client, index='blog').query(q)[0:20]\n response = s.execute()\n print('*****************', q, s)\n print('Total hits found.', response.hits.total, response)\n results = []\n for hit in response:\n result_tuple = hit.title, hit.body, hit.tags\n print(result_tuple)\n results.append(result_tuple)\n return results\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,312 |
d1c596a2d364254347f4ef50df1216455cd906da
|
a=1
b=2
c=a+b
print (a+b);print (c)
print(c)
|
[
"\na=1\nb=2\nc=a+b\nprint (a+b);print (c)\nprint(c)",
"a = 1\nb = 2\nc = a + b\nprint(a + b)\nprint(c)\nprint(c)\n",
"<assignment token>\nprint(a + b)\nprint(c)\nprint(c)\n",
"<assignment token>\n<code token>\n"
] | false |
98,313 |
df233edfee820247a6899e707c3be83eb1d96254
|
import requests
import numpy as np
from .mini_dsfdr import dsfdr
from .utils import debug, get_dbbact_server_address
from collections import defaultdict
def calour_enrichment(seqs1, seqs2, term_type="term"):
'''
Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)
Parameters
----------
seqs1:list of str
first set of sequences (ACGT)
seqs1:list of str
second set of sequences (ACGT)
term_type : str (optional)
type of the term to analyze for enrichment. can be:
"term" : analyze the terms per annotation (not including parent terms)
"annotation" : analyze the annotations associated with each sequence
Returns
-------
err : str
empty if ok, otherwise the error encountered
term_list : list of str
the terms which are enriched
pvals : list of float
the p-value for each term
odif : list of float
the effect size for each term
'''
import calour as ca
db = ca.database._get_database_class('dbbact')
# set the same seed (since we use a random permutation test)
np.random.seed(2018)
all_seqs = set(seqs1).union(set(seqs2))
seqs2 = list(all_seqs - set(seqs1))
if len(seqs2) == 0:
return 'No sequences remaining in background fasta after removing the sequences of interest', None, None, None
all_seqs = list(all_seqs)
# get the annotations for the sequences
info = {}
info['sequence_terms'], info['sequence_annotations'], info['annotations'] = get_seq_annotations_fast(all_seqs)
terms_df, resmat, features_df = db.db.term_enrichment(seqs1, seqs2, info['annotations'], info['sequence_annotations'], term_type=term_type)
print(terms_df)
return '', terms_df['feature'].values, terms_df['qval'], terms_df['odif']
def getannotationstrings2(cann):
"""
get a nice string summary of a curation
input:
cann : dict from /sequences/get_annotations (one from the list)
output:
cdesc : str
a short summary of each annotation
"""
cdesc = ''
if cann['description']:
cdesc += cann['description'] + ' ('
if cann['annotationtype'] == 'diffexp':
chigh = []
clow = []
call = []
for cdet in cann['details']:
if cdet[0] == 'all':
call.append(cdet[1])
continue
if cdet[0] == 'low':
clow.append(cdet[1])
continue
if cdet[0] == 'high':
chigh.append(cdet[1])
continue
cdesc += ' high in '
for cval in chigh:
cdesc += cval + ' '
cdesc += ' compared to '
for cval in clow:
cdesc += cval + ' '
cdesc += ' in '
for cval in call:
cdesc += cval + ' '
elif cann['annotationtype'] == 'isa':
cdesc += ' is a '
for cdet in cann['details']:
cdesc += 'cdet,'
elif cann['annotationtype'] == 'contamination':
cdesc += 'contamination'
else:
cdesc += cann['annotationtype'] + ' '
for cdet in cann['details']:
cdesc = cdesc + ' ' + cdet[1] + ','
if len(cdesc) >= 1 and cdesc[-1] == ',':
cdesc = cdesc[:-1]
return cdesc
def get_seq_annotations_fast(sequences):
debug(2, 'get_seq_annotations_fast for %d sequences' % len(sequences))
rdata = {}
rdata['sequences'] = sequences
res = requests.get(get_dbbact_server_address() + '/sequences/get_fast_annotations', json=rdata)
if res.status_code != 200:
debug(5, 'error getting fast annotations for sequence list')
return None, None, None
res = res.json()
debug(2, 'got %d total annotations' % len(res['annotations']))
sequence_terms = {}
sequence_annotations = {}
for cseq in sequences:
sequence_terms[cseq] = []
sequence_annotations[cseq] = []
for cseqannotation in res['seqannotations']:
cpos = cseqannotation[0]
# need str since json dict is always string
cseq = sequences[cpos]
sequence_annotations[cseq].extend(cseqannotation[1])
for cannotation in cseqannotation[1]:
for k, v in res['annotations'][str(cannotation)]['parents'].items():
if k == 'high' or k == 'all':
for cterm in v:
sequence_terms[cseq].append(cterm)
elif k == 'low':
for cterm in v:
sequence_terms[cseq].append('-' + cterm)
annotations = res['annotations']
# replace the string in the key with an int (since in json key is always str)
keys = list(annotations.keys())
for cid in keys:
annotations[int(cid)] = annotations.pop(cid)
# count total associations
total_annotations = 0
for cseq_annotations in sequence_annotations.values():
total_annotations += len(cseq_annotations)
debug(2, 'Got %d associations' % total_annotations)
return sequence_terms, sequence_annotations, annotations
def _get_term_features(features, feature_terms):
'''Get numpy array of score of each term for each feature
Parameters
----------
features : list of str
A list of DNA sequences
feature_terms : dict of {feature: list of tuples of (term, amount)}
The terms associated with each feature in exp
feature (key) : str the feature (out of exp) to which the terms relate
feature_terms (value) : list of tuples of (str or int the terms associated with this feature, count)
Returns
-------
numpy array of T (terms) * F (features)
total counts of each term (row) in each feature (column)
list of str
list of the terms corresponding to the numpy array rows
'''
# get all terms
terms = {}
cpos = 0
for cfeature, ctermlist in feature_terms.items():
for cterm, ccount in ctermlist:
if cterm not in terms:
terms[cterm] = cpos
cpos += 1
tot_features_inflated = 0
feature_pos = {}
for cfeature in features:
ctermlist = feature_terms[cfeature]
feature_pos[cfeature] = tot_features_inflated
tot_features_inflated += len(ctermlist)
# populate the matrix
res = np.zeros([len(terms), len(features)])
for idx, cfeature in enumerate(features):
for cterm, ctermcount in feature_terms[cfeature]:
res[terms[cterm], idx] += ctermcount
term_list = sorted(terms, key=terms.get)
debug(2, 'created terms X features matrix with %d terms (rows), %d features (columns)' % (res.shape[0], res.shape[1]))
return res, term_list
def _get_term_features_inflated(features, feature_terms):
'''Get numpy array of score of each term for each feature. This is the inflated version (used for card mean) to overcome the different number of annotations per feature. But slower and not memory efficient
Parameters
----------
features : list of str
A list of DNA sequences
feature_terms : dict of {feature: list of tuples of (term, amount)}
The terms associated with each feature in exp
feature (key) : str the feature (out of exp) to which the terms relate
feature_terms (value) : list of tuples of (str or int the terms associated with this feature, count)
Returns
-------
numpy array of T (terms) * F (inflated features)
total counts of each term (row) in each feature (column)
list of str
list of the terms corresponding to the numpy array rows
'''
# get all terms
terms = {}
cpos = 0
for cfeature, ctermlist in feature_terms.items():
for cterm, ccount in ctermlist:
if cterm not in terms:
terms[cterm] = cpos
cpos += 1
tot_features_inflated = 0
feature_pos = {}
for cfeature in features:
ctermlist = feature_terms[cfeature]
feature_pos[cfeature] = tot_features_inflated
tot_features_inflated += len(ctermlist)
res = np.zeros([len(terms), tot_features_inflated])
for cfeature in features:
for cterm, ctermcount in feature_terms[cfeature]:
res[terms[cterm], feature_pos[cfeature]] += ctermcount
term_list = sorted(terms, key=terms.get)
debug(2, 'created terms X features matrix with %d terms (rows), %d features (columns)' % (res.shape[0], res.shape[1]))
return res, term_list
def _get_all_annotation_string_counts(features, sequence_annotations, annotations):
feature_annotations = {}
for cseq, annotations_list in sequence_annotations.items():
if cseq not in features:
continue
newdesc = []
for cannotation in annotations_list:
cdesc = getannotationstrings2(annotations[cannotation])
newdesc.append((cdesc, 1))
feature_annotations[cseq] = newdesc
return feature_annotations
def _get_all_term_counts(features, feature_annotations, annotations):
'''Get counts of all terms associated with each feature
Parameters
----------
features: list of str
the sequences to get the terms for
feature_annotations: dict of {feature (str): annotationIDs (list of int))
the list of annotations each feature appears in
annotations: dict of {annotationsid (int): annotation details (dict)}
all the annotations in the experiment
Returns
-------
dict of {feature (str): annotation counts (list of (term(str), count(int)))}
'''
feature_terms = {}
for cfeature in features:
annotation_list = [annotations[x] for x in feature_annotations[cfeature]]
feature_terms[cfeature] = get_annotation_term_counts(annotation_list)
return feature_terms
def get_annotation_term_counts(annotations):
'''Get the annotation type corrected count for all terms in annotations
Parameters
----------
annotations : list of dict
list of annotations
Returns
-------
list of tuples (term, count)
'''
term_count = defaultdict(int)
for cannotation in annotations:
if cannotation['annotationtype'] == 'common':
for cdesc in cannotation['details']:
term_count[cdesc[1]] += 1
continue
if cannotation['annotationtype'] == 'dominant':
for cdesc in cannotation['details']:
term_count[cdesc[1]] += 2
continue
if cannotation['annotationtype'] == 'other':
for cdesc in cannotation['details']:
term_count[cdesc[1]] += 0.5
continue
if cannotation['annotationtype'] == 'contamination':
term_count['contamination'] += 1
continue
if cannotation['annotationtype'] in ['diffexp', 'positive correlation', 'negative correlation']:
for cdesc in cannotation['details']:
if cdesc[0] == 'all':
term_count[cdesc[1]] += 1
continue
if cdesc[0] == 'high':
term_count[cdesc[1]] += 2
continue
if cdesc[0] == 'low':
term_count[cdesc[1]] -= 2
continue
debug(4, 'unknown detail type %s encountered' % cdesc[0])
continue
if cannotation['annotationtype'] == 'other':
continue
debug(4, 'unknown annotation type %s encountered' % cannotation['annotationtype'])
res = []
for k, v in term_count.items():
# flip and add '-' to term if negative
if v < 0:
k = '-' + k
v = -v
res.append((k, v))
return res
def enrichment(seqs1, seqs2, term_type="term"):
'''
Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)
Parameters
----------
seqs1:list of str
first set of sequences (ACGT)
seqs1:list of str
second set of sequences (ACGT)
term_type : str (optional)
type of the term to analyze for enrichment. can be:
"term" : analyze the terms per annotation (not including parent terms)
"annotation" : analyze the annotations associated with each sequence
Returns
-------
err : str
empty if ok, otherwise the error encountered
term_list : list of str
the terms which are enriched
pvals : list of float
the p-value for each term
odif : list of float
the effect size for each term
'''
# set the same seed (since we use a random permutation test)
np.random.seed(2018)
all_seqs = set(seqs1).union(set(seqs2))
seqs2 = list(all_seqs - set(seqs1))
if len(seqs2) == 0:
return 'No sequences remaining in background fasta after removing the sequences of interest', None, None, None
all_seqs = list(all_seqs)
# get the annotations for the sequences
info = {}
info['sequence_terms'], info['sequence_annotations'], info['annotations'] = get_seq_annotations_fast(all_seqs)
if term_type == 'term':
debug(2, 'getting all_term counts')
feature_terms = _get_all_term_counts(all_seqs, info['sequence_annotations'], info['annotations'])
elif term_type == 'annotation':
debug(2, 'getting all_annotation string counts')
feature_terms = _get_all_annotation_string_counts(all_seqs, info['sequence_annotations'], info['annotations'])
else:
debug(8, 'strange term_type encountered: %s' % term_type)
# count the total number of terms
all_terms_set = set()
for cterms in feature_terms.values():
for (cterm, ccount) in cterms:
all_terms_set.add(cterm)
debug(2, 'found %d terms associated with all sequences (%d)' % (len(all_terms_set), len(all_seqs)))
debug(2, 'getting seqs1 feature array')
feature_array, term_list = _get_term_features(seqs1, feature_terms)
debug(2, 'getting seqs2 feature array')
bg_array, term_list = _get_term_features(seqs2, feature_terms)
debug(2, 'bgarray: %s, feature_array: %s' % (bg_array.shape, feature_array.shape))
all_feature_array = np.hstack([feature_array, bg_array])
labels = np.zeros(all_feature_array.shape[1])
labels[:feature_array.shape[1]] = 1
debug(2, 'starting dsfdr for enrichment')
keep, odif, pvals = dsfdr(all_feature_array, labels, method='meandiff', transform_type=None, alpha=0.1, numperm=1000, fdr_method='dsfdr')
keep = np.where(keep)[0]
if len(keep) == 0:
debug(2, 'no enriched terms found')
term_list = np.array(term_list)[keep]
odif = odif[keep]
pvals = pvals[keep]
si = np.argsort(odif)
odif = odif[si]
pvals = pvals[si]
term_list = term_list[si]
return '', term_list, pvals, odif
|
[
"import requests\n\nimport numpy as np\nfrom .mini_dsfdr import dsfdr\nfrom .utils import debug, get_dbbact_server_address\nfrom collections import defaultdict\n\n\ndef calour_enrichment(seqs1, seqs2, term_type=\"term\"):\n '''\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n '''\n import calour as ca\n\n db = ca.database._get_database_class('dbbact')\n\n # set the same seed (since we use a random permutation test)\n np.random.seed(2018)\n\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return 'No sequences remaining in background fasta after removing the sequences of interest', None, None, None\n all_seqs = list(all_seqs)\n\n # get the annotations for the sequences\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'] = get_seq_annotations_fast(all_seqs)\n\n terms_df, resmat, features_df = db.db.term_enrichment(seqs1, seqs2, info['annotations'], info['sequence_annotations'], term_type=term_type)\n print(terms_df)\n return '', terms_df['feature'].values, terms_df['qval'], terms_df['odif']\n\n\ndef getannotationstrings2(cann):\n \"\"\"\n get a nice string summary of a curation\n\n input:\n cann : dict from /sequences/get_annotations (one from the list)\n output:\n cdesc : str\n a short summary of each annotation\n \"\"\"\n cdesc = ''\n if cann['description']:\n cdesc += cann['description'] + ' ('\n if cann['annotationtype'] == 'diffexp':\n chigh = []\n clow = []\n call = []\n for cdet in cann['details']:\n if cdet[0] == 'all':\n call.append(cdet[1])\n continue\n if cdet[0] == 'low':\n clow.append(cdet[1])\n continue\n if cdet[0] == 'high':\n chigh.append(cdet[1])\n continue\n cdesc += ' high in '\n for cval in chigh:\n cdesc += cval + ' '\n cdesc += ' compared to '\n for cval in clow:\n cdesc += cval + ' '\n cdesc += ' in '\n for cval in call:\n cdesc += cval + ' '\n elif cann['annotationtype'] == 'isa':\n cdesc += ' is a '\n for cdet in cann['details']:\n cdesc += 'cdet,'\n elif cann['annotationtype'] == 'contamination':\n cdesc += 'contamination'\n else:\n cdesc += cann['annotationtype'] + ' '\n for cdet in cann['details']:\n cdesc = cdesc + ' ' + cdet[1] + ','\n\n if len(cdesc) >= 1 and cdesc[-1] == ',':\n cdesc = cdesc[:-1]\n return cdesc\n\n\ndef get_seq_annotations_fast(sequences):\n debug(2, 'get_seq_annotations_fast for %d sequences' % len(sequences))\n rdata = {}\n rdata['sequences'] = sequences\n res = requests.get(get_dbbact_server_address() + '/sequences/get_fast_annotations', json=rdata)\n if res.status_code != 200:\n debug(5, 'error getting fast annotations for sequence list')\n return None, None, None\n res = res.json()\n debug(2, 'got %d total annotations' % len(res['annotations']))\n\n sequence_terms = {}\n sequence_annotations = {}\n for cseq in sequences:\n sequence_terms[cseq] = []\n sequence_annotations[cseq] = []\n for cseqannotation in res['seqannotations']:\n cpos = cseqannotation[0]\n # need str since json dict is always string\n cseq = sequences[cpos]\n sequence_annotations[cseq].extend(cseqannotation[1])\n for cannotation in cseqannotation[1]:\n for k, v in res['annotations'][str(cannotation)]['parents'].items():\n if k == 'high' or k == 'all':\n for cterm in v:\n sequence_terms[cseq].append(cterm)\n elif k == 'low':\n for cterm in v:\n sequence_terms[cseq].append('-' + cterm)\n\n annotations = res['annotations']\n\n # replace the string in the key with an int (since in json key is always str)\n keys = list(annotations.keys())\n for cid in keys:\n annotations[int(cid)] = annotations.pop(cid)\n\n # count total associations\n total_annotations = 0\n for cseq_annotations in sequence_annotations.values():\n total_annotations += len(cseq_annotations)\n debug(2, 'Got %d associations' % total_annotations)\n\n return sequence_terms, sequence_annotations, annotations\n\n\ndef _get_term_features(features, feature_terms):\n '''Get numpy array of score of each term for each feature\n\n Parameters\n ----------\n features : list of str\n A list of DNA sequences\n feature_terms : dict of {feature: list of tuples of (term, amount)}\n The terms associated with each feature in exp\n feature (key) : str the feature (out of exp) to which the terms relate\n feature_terms (value) : list of tuples of (str or int the terms associated with this feature, count)\n\n Returns\n -------\n numpy array of T (terms) * F (features)\n total counts of each term (row) in each feature (column)\n list of str\n list of the terms corresponding to the numpy array rows\n '''\n # get all terms\n terms = {}\n cpos = 0\n for cfeature, ctermlist in feature_terms.items():\n for cterm, ccount in ctermlist:\n if cterm not in terms:\n terms[cterm] = cpos\n cpos += 1\n\n tot_features_inflated = 0\n feature_pos = {}\n for cfeature in features:\n ctermlist = feature_terms[cfeature]\n feature_pos[cfeature] = tot_features_inflated\n tot_features_inflated += len(ctermlist)\n\n # populate the matrix\n res = np.zeros([len(terms), len(features)])\n for idx, cfeature in enumerate(features):\n for cterm, ctermcount in feature_terms[cfeature]:\n res[terms[cterm], idx] += ctermcount\n\n term_list = sorted(terms, key=terms.get)\n debug(2, 'created terms X features matrix with %d terms (rows), %d features (columns)' % (res.shape[0], res.shape[1]))\n return res, term_list\n\n\ndef _get_term_features_inflated(features, feature_terms):\n '''Get numpy array of score of each term for each feature. This is the inflated version (used for card mean) to overcome the different number of annotations per feature. But slower and not memory efficient\n\n Parameters\n ----------\n features : list of str\n A list of DNA sequences\n feature_terms : dict of {feature: list of tuples of (term, amount)}\n The terms associated with each feature in exp\n feature (key) : str the feature (out of exp) to which the terms relate\n feature_terms (value) : list of tuples of (str or int the terms associated with this feature, count)\n\n Returns\n -------\n numpy array of T (terms) * F (inflated features)\n total counts of each term (row) in each feature (column)\n list of str\n list of the terms corresponding to the numpy array rows\n '''\n # get all terms\n terms = {}\n cpos = 0\n for cfeature, ctermlist in feature_terms.items():\n for cterm, ccount in ctermlist:\n if cterm not in terms:\n terms[cterm] = cpos\n cpos += 1\n\n tot_features_inflated = 0\n feature_pos = {}\n for cfeature in features:\n ctermlist = feature_terms[cfeature]\n feature_pos[cfeature] = tot_features_inflated\n tot_features_inflated += len(ctermlist)\n\n res = np.zeros([len(terms), tot_features_inflated])\n\n for cfeature in features:\n for cterm, ctermcount in feature_terms[cfeature]:\n res[terms[cterm], feature_pos[cfeature]] += ctermcount\n term_list = sorted(terms, key=terms.get)\n debug(2, 'created terms X features matrix with %d terms (rows), %d features (columns)' % (res.shape[0], res.shape[1]))\n return res, term_list\n\n\ndef _get_all_annotation_string_counts(features, sequence_annotations, annotations):\n feature_annotations = {}\n for cseq, annotations_list in sequence_annotations.items():\n if cseq not in features:\n continue\n newdesc = []\n for cannotation in annotations_list:\n cdesc = getannotationstrings2(annotations[cannotation])\n newdesc.append((cdesc, 1))\n feature_annotations[cseq] = newdesc\n return feature_annotations\n\n\ndef _get_all_term_counts(features, feature_annotations, annotations):\n '''Get counts of all terms associated with each feature\n\n Parameters\n ----------\n features: list of str\n the sequences to get the terms for\n feature_annotations: dict of {feature (str): annotationIDs (list of int))\n the list of annotations each feature appears in\n annotations: dict of {annotationsid (int): annotation details (dict)}\n all the annotations in the experiment\n\n Returns\n -------\n dict of {feature (str): annotation counts (list of (term(str), count(int)))}\n '''\n feature_terms = {}\n for cfeature in features:\n annotation_list = [annotations[x] for x in feature_annotations[cfeature]]\n feature_terms[cfeature] = get_annotation_term_counts(annotation_list)\n return feature_terms\n\n\ndef get_annotation_term_counts(annotations):\n '''Get the annotation type corrected count for all terms in annotations\n\n Parameters\n ----------\n annotations : list of dict\n list of annotations\n\n Returns\n -------\n list of tuples (term, count)\n '''\n term_count = defaultdict(int)\n for cannotation in annotations:\n if cannotation['annotationtype'] == 'common':\n for cdesc in cannotation['details']:\n term_count[cdesc[1]] += 1\n continue\n if cannotation['annotationtype'] == 'dominant':\n for cdesc in cannotation['details']:\n term_count[cdesc[1]] += 2\n continue\n if cannotation['annotationtype'] == 'other':\n for cdesc in cannotation['details']:\n term_count[cdesc[1]] += 0.5\n continue\n if cannotation['annotationtype'] == 'contamination':\n term_count['contamination'] += 1\n continue\n if cannotation['annotationtype'] in ['diffexp', 'positive correlation', 'negative correlation']:\n for cdesc in cannotation['details']:\n if cdesc[0] == 'all':\n term_count[cdesc[1]] += 1\n continue\n if cdesc[0] == 'high':\n term_count[cdesc[1]] += 2\n continue\n if cdesc[0] == 'low':\n term_count[cdesc[1]] -= 2\n continue\n debug(4, 'unknown detail type %s encountered' % cdesc[0])\n continue\n if cannotation['annotationtype'] == 'other':\n continue\n debug(4, 'unknown annotation type %s encountered' % cannotation['annotationtype'])\n res = []\n for k, v in term_count.items():\n # flip and add '-' to term if negative\n if v < 0:\n k = '-' + k\n v = -v\n res.append((k, v))\n return res\n\n\ndef enrichment(seqs1, seqs2, term_type=\"term\"):\n '''\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n '''\n # set the same seed (since we use a random permutation test)\n np.random.seed(2018)\n\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return 'No sequences remaining in background fasta after removing the sequences of interest', None, None, None\n all_seqs = list(all_seqs)\n\n # get the annotations for the sequences\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'] = get_seq_annotations_fast(all_seqs)\n\n if term_type == 'term':\n debug(2, 'getting all_term counts')\n feature_terms = _get_all_term_counts(all_seqs, info['sequence_annotations'], info['annotations'])\n elif term_type == 'annotation':\n debug(2, 'getting all_annotation string counts')\n feature_terms = _get_all_annotation_string_counts(all_seqs, info['sequence_annotations'], info['annotations'])\n else:\n debug(8, 'strange term_type encountered: %s' % term_type)\n\n # count the total number of terms\n all_terms_set = set()\n for cterms in feature_terms.values():\n for (cterm, ccount) in cterms:\n all_terms_set.add(cterm)\n debug(2, 'found %d terms associated with all sequences (%d)' % (len(all_terms_set), len(all_seqs)))\n\n debug(2, 'getting seqs1 feature array')\n feature_array, term_list = _get_term_features(seqs1, feature_terms)\n debug(2, 'getting seqs2 feature array')\n bg_array, term_list = _get_term_features(seqs2, feature_terms)\n\n debug(2, 'bgarray: %s, feature_array: %s' % (bg_array.shape, feature_array.shape))\n all_feature_array = np.hstack([feature_array, bg_array])\n\n labels = np.zeros(all_feature_array.shape[1])\n labels[:feature_array.shape[1]] = 1\n\n debug(2, 'starting dsfdr for enrichment')\n keep, odif, pvals = dsfdr(all_feature_array, labels, method='meandiff', transform_type=None, alpha=0.1, numperm=1000, fdr_method='dsfdr')\n keep = np.where(keep)[0]\n if len(keep) == 0:\n debug(2, 'no enriched terms found')\n term_list = np.array(term_list)[keep]\n odif = odif[keep]\n pvals = pvals[keep]\n si = np.argsort(odif)\n odif = odif[si]\n pvals = pvals[si]\n term_list = term_list[si]\n return '', term_list, pvals, odif\n",
"import requests\nimport numpy as np\nfrom .mini_dsfdr import dsfdr\nfrom .utils import debug, get_dbbact_server_address\nfrom collections import defaultdict\n\n\ndef calour_enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n import calour as ca\n db = ca.database._get_database_class('dbbact')\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n terms_df, resmat, features_df = db.db.term_enrichment(seqs1, seqs2,\n info['annotations'], info['sequence_annotations'], term_type=term_type)\n print(terms_df)\n return '', terms_df['feature'].values, terms_df['qval'], terms_df['odif']\n\n\ndef getannotationstrings2(cann):\n \"\"\"\n get a nice string summary of a curation\n\n input:\n cann : dict from /sequences/get_annotations (one from the list)\n output:\n cdesc : str\n a short summary of each annotation\n \"\"\"\n cdesc = ''\n if cann['description']:\n cdesc += cann['description'] + ' ('\n if cann['annotationtype'] == 'diffexp':\n chigh = []\n clow = []\n call = []\n for cdet in cann['details']:\n if cdet[0] == 'all':\n call.append(cdet[1])\n continue\n if cdet[0] == 'low':\n clow.append(cdet[1])\n continue\n if cdet[0] == 'high':\n chigh.append(cdet[1])\n continue\n cdesc += ' high in '\n for cval in chigh:\n cdesc += cval + ' '\n cdesc += ' compared to '\n for cval in clow:\n cdesc += cval + ' '\n cdesc += ' in '\n for cval in call:\n cdesc += cval + ' '\n elif cann['annotationtype'] == 'isa':\n cdesc += ' is a '\n for cdet in cann['details']:\n cdesc += 'cdet,'\n elif cann['annotationtype'] == 'contamination':\n cdesc += 'contamination'\n else:\n cdesc += cann['annotationtype'] + ' '\n for cdet in cann['details']:\n cdesc = cdesc + ' ' + cdet[1] + ','\n if len(cdesc) >= 1 and cdesc[-1] == ',':\n cdesc = cdesc[:-1]\n return cdesc\n\n\ndef get_seq_annotations_fast(sequences):\n debug(2, 'get_seq_annotations_fast for %d sequences' % len(sequences))\n rdata = {}\n rdata['sequences'] = sequences\n res = requests.get(get_dbbact_server_address() +\n '/sequences/get_fast_annotations', json=rdata)\n if res.status_code != 200:\n debug(5, 'error getting fast annotations for sequence list')\n return None, None, None\n res = res.json()\n debug(2, 'got %d total annotations' % len(res['annotations']))\n sequence_terms = {}\n sequence_annotations = {}\n for cseq in sequences:\n sequence_terms[cseq] = []\n sequence_annotations[cseq] = []\n for cseqannotation in res['seqannotations']:\n cpos = cseqannotation[0]\n cseq = sequences[cpos]\n sequence_annotations[cseq].extend(cseqannotation[1])\n for cannotation in cseqannotation[1]:\n for k, v in res['annotations'][str(cannotation)]['parents'].items(\n ):\n if k == 'high' or k == 'all':\n for cterm in v:\n sequence_terms[cseq].append(cterm)\n elif k == 'low':\n for cterm in v:\n sequence_terms[cseq].append('-' + cterm)\n annotations = res['annotations']\n keys = list(annotations.keys())\n for cid in keys:\n annotations[int(cid)] = annotations.pop(cid)\n total_annotations = 0\n for cseq_annotations in sequence_annotations.values():\n total_annotations += len(cseq_annotations)\n debug(2, 'Got %d associations' % total_annotations)\n return sequence_terms, sequence_annotations, annotations\n\n\ndef _get_term_features(features, feature_terms):\n \"\"\"Get numpy array of score of each term for each feature\n\n Parameters\n ----------\n features : list of str\n A list of DNA sequences\n feature_terms : dict of {feature: list of tuples of (term, amount)}\n The terms associated with each feature in exp\n feature (key) : str the feature (out of exp) to which the terms relate\n feature_terms (value) : list of tuples of (str or int the terms associated with this feature, count)\n\n Returns\n -------\n numpy array of T (terms) * F (features)\n total counts of each term (row) in each feature (column)\n list of str\n list of the terms corresponding to the numpy array rows\n \"\"\"\n terms = {}\n cpos = 0\n for cfeature, ctermlist in feature_terms.items():\n for cterm, ccount in ctermlist:\n if cterm not in terms:\n terms[cterm] = cpos\n cpos += 1\n tot_features_inflated = 0\n feature_pos = {}\n for cfeature in features:\n ctermlist = feature_terms[cfeature]\n feature_pos[cfeature] = tot_features_inflated\n tot_features_inflated += len(ctermlist)\n res = np.zeros([len(terms), len(features)])\n for idx, cfeature in enumerate(features):\n for cterm, ctermcount in feature_terms[cfeature]:\n res[terms[cterm], idx] += ctermcount\n term_list = sorted(terms, key=terms.get)\n debug(2, \n 'created terms X features matrix with %d terms (rows), %d features (columns)'\n % (res.shape[0], res.shape[1]))\n return res, term_list\n\n\ndef _get_term_features_inflated(features, feature_terms):\n \"\"\"Get numpy array of score of each term for each feature. This is the inflated version (used for card mean) to overcome the different number of annotations per feature. But slower and not memory efficient\n\n Parameters\n ----------\n features : list of str\n A list of DNA sequences\n feature_terms : dict of {feature: list of tuples of (term, amount)}\n The terms associated with each feature in exp\n feature (key) : str the feature (out of exp) to which the terms relate\n feature_terms (value) : list of tuples of (str or int the terms associated with this feature, count)\n\n Returns\n -------\n numpy array of T (terms) * F (inflated features)\n total counts of each term (row) in each feature (column)\n list of str\n list of the terms corresponding to the numpy array rows\n \"\"\"\n terms = {}\n cpos = 0\n for cfeature, ctermlist in feature_terms.items():\n for cterm, ccount in ctermlist:\n if cterm not in terms:\n terms[cterm] = cpos\n cpos += 1\n tot_features_inflated = 0\n feature_pos = {}\n for cfeature in features:\n ctermlist = feature_terms[cfeature]\n feature_pos[cfeature] = tot_features_inflated\n tot_features_inflated += len(ctermlist)\n res = np.zeros([len(terms), tot_features_inflated])\n for cfeature in features:\n for cterm, ctermcount in feature_terms[cfeature]:\n res[terms[cterm], feature_pos[cfeature]] += ctermcount\n term_list = sorted(terms, key=terms.get)\n debug(2, \n 'created terms X features matrix with %d terms (rows), %d features (columns)'\n % (res.shape[0], res.shape[1]))\n return res, term_list\n\n\ndef _get_all_annotation_string_counts(features, sequence_annotations,\n annotations):\n feature_annotations = {}\n for cseq, annotations_list in sequence_annotations.items():\n if cseq not in features:\n continue\n newdesc = []\n for cannotation in annotations_list:\n cdesc = getannotationstrings2(annotations[cannotation])\n newdesc.append((cdesc, 1))\n feature_annotations[cseq] = newdesc\n return feature_annotations\n\n\ndef _get_all_term_counts(features, feature_annotations, annotations):\n \"\"\"Get counts of all terms associated with each feature\n\n Parameters\n ----------\n features: list of str\n the sequences to get the terms for\n feature_annotations: dict of {feature (str): annotationIDs (list of int))\n the list of annotations each feature appears in\n annotations: dict of {annotationsid (int): annotation details (dict)}\n all the annotations in the experiment\n\n Returns\n -------\n dict of {feature (str): annotation counts (list of (term(str), count(int)))}\n \"\"\"\n feature_terms = {}\n for cfeature in features:\n annotation_list = [annotations[x] for x in feature_annotations[\n cfeature]]\n feature_terms[cfeature] = get_annotation_term_counts(annotation_list)\n return feature_terms\n\n\ndef get_annotation_term_counts(annotations):\n \"\"\"Get the annotation type corrected count for all terms in annotations\n\n Parameters\n ----------\n annotations : list of dict\n list of annotations\n\n Returns\n -------\n list of tuples (term, count)\n \"\"\"\n term_count = defaultdict(int)\n for cannotation in annotations:\n if cannotation['annotationtype'] == 'common':\n for cdesc in cannotation['details']:\n term_count[cdesc[1]] += 1\n continue\n if cannotation['annotationtype'] == 'dominant':\n for cdesc in cannotation['details']:\n term_count[cdesc[1]] += 2\n continue\n if cannotation['annotationtype'] == 'other':\n for cdesc in cannotation['details']:\n term_count[cdesc[1]] += 0.5\n continue\n if cannotation['annotationtype'] == 'contamination':\n term_count['contamination'] += 1\n continue\n if cannotation['annotationtype'] in ['diffexp',\n 'positive correlation', 'negative correlation']:\n for cdesc in cannotation['details']:\n if cdesc[0] == 'all':\n term_count[cdesc[1]] += 1\n continue\n if cdesc[0] == 'high':\n term_count[cdesc[1]] += 2\n continue\n if cdesc[0] == 'low':\n term_count[cdesc[1]] -= 2\n continue\n debug(4, 'unknown detail type %s encountered' % cdesc[0])\n continue\n if cannotation['annotationtype'] == 'other':\n continue\n debug(4, 'unknown annotation type %s encountered' % cannotation[\n 'annotationtype'])\n res = []\n for k, v in term_count.items():\n if v < 0:\n k = '-' + k\n v = -v\n res.append((k, v))\n return res\n\n\ndef enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n if term_type == 'term':\n debug(2, 'getting all_term counts')\n feature_terms = _get_all_term_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n elif term_type == 'annotation':\n debug(2, 'getting all_annotation string counts')\n feature_terms = _get_all_annotation_string_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n else:\n debug(8, 'strange term_type encountered: %s' % term_type)\n all_terms_set = set()\n for cterms in feature_terms.values():\n for cterm, ccount in cterms:\n all_terms_set.add(cterm)\n debug(2, 'found %d terms associated with all sequences (%d)' % (len(\n all_terms_set), len(all_seqs)))\n debug(2, 'getting seqs1 feature array')\n feature_array, term_list = _get_term_features(seqs1, feature_terms)\n debug(2, 'getting seqs2 feature array')\n bg_array, term_list = _get_term_features(seqs2, feature_terms)\n debug(2, 'bgarray: %s, feature_array: %s' % (bg_array.shape,\n feature_array.shape))\n all_feature_array = np.hstack([feature_array, bg_array])\n labels = np.zeros(all_feature_array.shape[1])\n labels[:feature_array.shape[1]] = 1\n debug(2, 'starting dsfdr for enrichment')\n keep, odif, pvals = dsfdr(all_feature_array, labels, method='meandiff',\n transform_type=None, alpha=0.1, numperm=1000, fdr_method='dsfdr')\n keep = np.where(keep)[0]\n if len(keep) == 0:\n debug(2, 'no enriched terms found')\n term_list = np.array(term_list)[keep]\n odif = odif[keep]\n pvals = pvals[keep]\n si = np.argsort(odif)\n odif = odif[si]\n pvals = pvals[si]\n term_list = term_list[si]\n return '', term_list, pvals, odif\n",
"<import token>\n\n\ndef calour_enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n import calour as ca\n db = ca.database._get_database_class('dbbact')\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n terms_df, resmat, features_df = db.db.term_enrichment(seqs1, seqs2,\n info['annotations'], info['sequence_annotations'], term_type=term_type)\n print(terms_df)\n return '', terms_df['feature'].values, terms_df['qval'], terms_df['odif']\n\n\ndef getannotationstrings2(cann):\n \"\"\"\n get a nice string summary of a curation\n\n input:\n cann : dict from /sequences/get_annotations (one from the list)\n output:\n cdesc : str\n a short summary of each annotation\n \"\"\"\n cdesc = ''\n if cann['description']:\n cdesc += cann['description'] + ' ('\n if cann['annotationtype'] == 'diffexp':\n chigh = []\n clow = []\n call = []\n for cdet in cann['details']:\n if cdet[0] == 'all':\n call.append(cdet[1])\n continue\n if cdet[0] == 'low':\n clow.append(cdet[1])\n continue\n if cdet[0] == 'high':\n chigh.append(cdet[1])\n continue\n cdesc += ' high in '\n for cval in chigh:\n cdesc += cval + ' '\n cdesc += ' compared to '\n for cval in clow:\n cdesc += cval + ' '\n cdesc += ' in '\n for cval in call:\n cdesc += cval + ' '\n elif cann['annotationtype'] == 'isa':\n cdesc += ' is a '\n for cdet in cann['details']:\n cdesc += 'cdet,'\n elif cann['annotationtype'] == 'contamination':\n cdesc += 'contamination'\n else:\n cdesc += cann['annotationtype'] + ' '\n for cdet in cann['details']:\n cdesc = cdesc + ' ' + cdet[1] + ','\n if len(cdesc) >= 1 and cdesc[-1] == ',':\n cdesc = cdesc[:-1]\n return cdesc\n\n\ndef get_seq_annotations_fast(sequences):\n debug(2, 'get_seq_annotations_fast for %d sequences' % len(sequences))\n rdata = {}\n rdata['sequences'] = sequences\n res = requests.get(get_dbbact_server_address() +\n '/sequences/get_fast_annotations', json=rdata)\n if res.status_code != 200:\n debug(5, 'error getting fast annotations for sequence list')\n return None, None, None\n res = res.json()\n debug(2, 'got %d total annotations' % len(res['annotations']))\n sequence_terms = {}\n sequence_annotations = {}\n for cseq in sequences:\n sequence_terms[cseq] = []\n sequence_annotations[cseq] = []\n for cseqannotation in res['seqannotations']:\n cpos = cseqannotation[0]\n cseq = sequences[cpos]\n sequence_annotations[cseq].extend(cseqannotation[1])\n for cannotation in cseqannotation[1]:\n for k, v in res['annotations'][str(cannotation)]['parents'].items(\n ):\n if k == 'high' or k == 'all':\n for cterm in v:\n sequence_terms[cseq].append(cterm)\n elif k == 'low':\n for cterm in v:\n sequence_terms[cseq].append('-' + cterm)\n annotations = res['annotations']\n keys = list(annotations.keys())\n for cid in keys:\n annotations[int(cid)] = annotations.pop(cid)\n total_annotations = 0\n for cseq_annotations in sequence_annotations.values():\n total_annotations += len(cseq_annotations)\n debug(2, 'Got %d associations' % total_annotations)\n return sequence_terms, sequence_annotations, annotations\n\n\ndef _get_term_features(features, feature_terms):\n \"\"\"Get numpy array of score of each term for each feature\n\n Parameters\n ----------\n features : list of str\n A list of DNA sequences\n feature_terms : dict of {feature: list of tuples of (term, amount)}\n The terms associated with each feature in exp\n feature (key) : str the feature (out of exp) to which the terms relate\n feature_terms (value) : list of tuples of (str or int the terms associated with this feature, count)\n\n Returns\n -------\n numpy array of T (terms) * F (features)\n total counts of each term (row) in each feature (column)\n list of str\n list of the terms corresponding to the numpy array rows\n \"\"\"\n terms = {}\n cpos = 0\n for cfeature, ctermlist in feature_terms.items():\n for cterm, ccount in ctermlist:\n if cterm not in terms:\n terms[cterm] = cpos\n cpos += 1\n tot_features_inflated = 0\n feature_pos = {}\n for cfeature in features:\n ctermlist = feature_terms[cfeature]\n feature_pos[cfeature] = tot_features_inflated\n tot_features_inflated += len(ctermlist)\n res = np.zeros([len(terms), len(features)])\n for idx, cfeature in enumerate(features):\n for cterm, ctermcount in feature_terms[cfeature]:\n res[terms[cterm], idx] += ctermcount\n term_list = sorted(terms, key=terms.get)\n debug(2, \n 'created terms X features matrix with %d terms (rows), %d features (columns)'\n % (res.shape[0], res.shape[1]))\n return res, term_list\n\n\ndef _get_term_features_inflated(features, feature_terms):\n \"\"\"Get numpy array of score of each term for each feature. This is the inflated version (used for card mean) to overcome the different number of annotations per feature. But slower and not memory efficient\n\n Parameters\n ----------\n features : list of str\n A list of DNA sequences\n feature_terms : dict of {feature: list of tuples of (term, amount)}\n The terms associated with each feature in exp\n feature (key) : str the feature (out of exp) to which the terms relate\n feature_terms (value) : list of tuples of (str or int the terms associated with this feature, count)\n\n Returns\n -------\n numpy array of T (terms) * F (inflated features)\n total counts of each term (row) in each feature (column)\n list of str\n list of the terms corresponding to the numpy array rows\n \"\"\"\n terms = {}\n cpos = 0\n for cfeature, ctermlist in feature_terms.items():\n for cterm, ccount in ctermlist:\n if cterm not in terms:\n terms[cterm] = cpos\n cpos += 1\n tot_features_inflated = 0\n feature_pos = {}\n for cfeature in features:\n ctermlist = feature_terms[cfeature]\n feature_pos[cfeature] = tot_features_inflated\n tot_features_inflated += len(ctermlist)\n res = np.zeros([len(terms), tot_features_inflated])\n for cfeature in features:\n for cterm, ctermcount in feature_terms[cfeature]:\n res[terms[cterm], feature_pos[cfeature]] += ctermcount\n term_list = sorted(terms, key=terms.get)\n debug(2, \n 'created terms X features matrix with %d terms (rows), %d features (columns)'\n % (res.shape[0], res.shape[1]))\n return res, term_list\n\n\ndef _get_all_annotation_string_counts(features, sequence_annotations,\n annotations):\n feature_annotations = {}\n for cseq, annotations_list in sequence_annotations.items():\n if cseq not in features:\n continue\n newdesc = []\n for cannotation in annotations_list:\n cdesc = getannotationstrings2(annotations[cannotation])\n newdesc.append((cdesc, 1))\n feature_annotations[cseq] = newdesc\n return feature_annotations\n\n\ndef _get_all_term_counts(features, feature_annotations, annotations):\n \"\"\"Get counts of all terms associated with each feature\n\n Parameters\n ----------\n features: list of str\n the sequences to get the terms for\n feature_annotations: dict of {feature (str): annotationIDs (list of int))\n the list of annotations each feature appears in\n annotations: dict of {annotationsid (int): annotation details (dict)}\n all the annotations in the experiment\n\n Returns\n -------\n dict of {feature (str): annotation counts (list of (term(str), count(int)))}\n \"\"\"\n feature_terms = {}\n for cfeature in features:\n annotation_list = [annotations[x] for x in feature_annotations[\n cfeature]]\n feature_terms[cfeature] = get_annotation_term_counts(annotation_list)\n return feature_terms\n\n\ndef get_annotation_term_counts(annotations):\n \"\"\"Get the annotation type corrected count for all terms in annotations\n\n Parameters\n ----------\n annotations : list of dict\n list of annotations\n\n Returns\n -------\n list of tuples (term, count)\n \"\"\"\n term_count = defaultdict(int)\n for cannotation in annotations:\n if cannotation['annotationtype'] == 'common':\n for cdesc in cannotation['details']:\n term_count[cdesc[1]] += 1\n continue\n if cannotation['annotationtype'] == 'dominant':\n for cdesc in cannotation['details']:\n term_count[cdesc[1]] += 2\n continue\n if cannotation['annotationtype'] == 'other':\n for cdesc in cannotation['details']:\n term_count[cdesc[1]] += 0.5\n continue\n if cannotation['annotationtype'] == 'contamination':\n term_count['contamination'] += 1\n continue\n if cannotation['annotationtype'] in ['diffexp',\n 'positive correlation', 'negative correlation']:\n for cdesc in cannotation['details']:\n if cdesc[0] == 'all':\n term_count[cdesc[1]] += 1\n continue\n if cdesc[0] == 'high':\n term_count[cdesc[1]] += 2\n continue\n if cdesc[0] == 'low':\n term_count[cdesc[1]] -= 2\n continue\n debug(4, 'unknown detail type %s encountered' % cdesc[0])\n continue\n if cannotation['annotationtype'] == 'other':\n continue\n debug(4, 'unknown annotation type %s encountered' % cannotation[\n 'annotationtype'])\n res = []\n for k, v in term_count.items():\n if v < 0:\n k = '-' + k\n v = -v\n res.append((k, v))\n return res\n\n\ndef enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n if term_type == 'term':\n debug(2, 'getting all_term counts')\n feature_terms = _get_all_term_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n elif term_type == 'annotation':\n debug(2, 'getting all_annotation string counts')\n feature_terms = _get_all_annotation_string_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n else:\n debug(8, 'strange term_type encountered: %s' % term_type)\n all_terms_set = set()\n for cterms in feature_terms.values():\n for cterm, ccount in cterms:\n all_terms_set.add(cterm)\n debug(2, 'found %d terms associated with all sequences (%d)' % (len(\n all_terms_set), len(all_seqs)))\n debug(2, 'getting seqs1 feature array')\n feature_array, term_list = _get_term_features(seqs1, feature_terms)\n debug(2, 'getting seqs2 feature array')\n bg_array, term_list = _get_term_features(seqs2, feature_terms)\n debug(2, 'bgarray: %s, feature_array: %s' % (bg_array.shape,\n feature_array.shape))\n all_feature_array = np.hstack([feature_array, bg_array])\n labels = np.zeros(all_feature_array.shape[1])\n labels[:feature_array.shape[1]] = 1\n debug(2, 'starting dsfdr for enrichment')\n keep, odif, pvals = dsfdr(all_feature_array, labels, method='meandiff',\n transform_type=None, alpha=0.1, numperm=1000, fdr_method='dsfdr')\n keep = np.where(keep)[0]\n if len(keep) == 0:\n debug(2, 'no enriched terms found')\n term_list = np.array(term_list)[keep]\n odif = odif[keep]\n pvals = pvals[keep]\n si = np.argsort(odif)\n odif = odif[si]\n pvals = pvals[si]\n term_list = term_list[si]\n return '', term_list, pvals, odif\n",
"<import token>\n\n\ndef calour_enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n import calour as ca\n db = ca.database._get_database_class('dbbact')\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n terms_df, resmat, features_df = db.db.term_enrichment(seqs1, seqs2,\n info['annotations'], info['sequence_annotations'], term_type=term_type)\n print(terms_df)\n return '', terms_df['feature'].values, terms_df['qval'], terms_df['odif']\n\n\ndef getannotationstrings2(cann):\n \"\"\"\n get a nice string summary of a curation\n\n input:\n cann : dict from /sequences/get_annotations (one from the list)\n output:\n cdesc : str\n a short summary of each annotation\n \"\"\"\n cdesc = ''\n if cann['description']:\n cdesc += cann['description'] + ' ('\n if cann['annotationtype'] == 'diffexp':\n chigh = []\n clow = []\n call = []\n for cdet in cann['details']:\n if cdet[0] == 'all':\n call.append(cdet[1])\n continue\n if cdet[0] == 'low':\n clow.append(cdet[1])\n continue\n if cdet[0] == 'high':\n chigh.append(cdet[1])\n continue\n cdesc += ' high in '\n for cval in chigh:\n cdesc += cval + ' '\n cdesc += ' compared to '\n for cval in clow:\n cdesc += cval + ' '\n cdesc += ' in '\n for cval in call:\n cdesc += cval + ' '\n elif cann['annotationtype'] == 'isa':\n cdesc += ' is a '\n for cdet in cann['details']:\n cdesc += 'cdet,'\n elif cann['annotationtype'] == 'contamination':\n cdesc += 'contamination'\n else:\n cdesc += cann['annotationtype'] + ' '\n for cdet in cann['details']:\n cdesc = cdesc + ' ' + cdet[1] + ','\n if len(cdesc) >= 1 and cdesc[-1] == ',':\n cdesc = cdesc[:-1]\n return cdesc\n\n\ndef get_seq_annotations_fast(sequences):\n debug(2, 'get_seq_annotations_fast for %d sequences' % len(sequences))\n rdata = {}\n rdata['sequences'] = sequences\n res = requests.get(get_dbbact_server_address() +\n '/sequences/get_fast_annotations', json=rdata)\n if res.status_code != 200:\n debug(5, 'error getting fast annotations for sequence list')\n return None, None, None\n res = res.json()\n debug(2, 'got %d total annotations' % len(res['annotations']))\n sequence_terms = {}\n sequence_annotations = {}\n for cseq in sequences:\n sequence_terms[cseq] = []\n sequence_annotations[cseq] = []\n for cseqannotation in res['seqannotations']:\n cpos = cseqannotation[0]\n cseq = sequences[cpos]\n sequence_annotations[cseq].extend(cseqannotation[1])\n for cannotation in cseqannotation[1]:\n for k, v in res['annotations'][str(cannotation)]['parents'].items(\n ):\n if k == 'high' or k == 'all':\n for cterm in v:\n sequence_terms[cseq].append(cterm)\n elif k == 'low':\n for cterm in v:\n sequence_terms[cseq].append('-' + cterm)\n annotations = res['annotations']\n keys = list(annotations.keys())\n for cid in keys:\n annotations[int(cid)] = annotations.pop(cid)\n total_annotations = 0\n for cseq_annotations in sequence_annotations.values():\n total_annotations += len(cseq_annotations)\n debug(2, 'Got %d associations' % total_annotations)\n return sequence_terms, sequence_annotations, annotations\n\n\ndef _get_term_features(features, feature_terms):\n \"\"\"Get numpy array of score of each term for each feature\n\n Parameters\n ----------\n features : list of str\n A list of DNA sequences\n feature_terms : dict of {feature: list of tuples of (term, amount)}\n The terms associated with each feature in exp\n feature (key) : str the feature (out of exp) to which the terms relate\n feature_terms (value) : list of tuples of (str or int the terms associated with this feature, count)\n\n Returns\n -------\n numpy array of T (terms) * F (features)\n total counts of each term (row) in each feature (column)\n list of str\n list of the terms corresponding to the numpy array rows\n \"\"\"\n terms = {}\n cpos = 0\n for cfeature, ctermlist in feature_terms.items():\n for cterm, ccount in ctermlist:\n if cterm not in terms:\n terms[cterm] = cpos\n cpos += 1\n tot_features_inflated = 0\n feature_pos = {}\n for cfeature in features:\n ctermlist = feature_terms[cfeature]\n feature_pos[cfeature] = tot_features_inflated\n tot_features_inflated += len(ctermlist)\n res = np.zeros([len(terms), len(features)])\n for idx, cfeature in enumerate(features):\n for cterm, ctermcount in feature_terms[cfeature]:\n res[terms[cterm], idx] += ctermcount\n term_list = sorted(terms, key=terms.get)\n debug(2, \n 'created terms X features matrix with %d terms (rows), %d features (columns)'\n % (res.shape[0], res.shape[1]))\n return res, term_list\n\n\n<function token>\n\n\ndef _get_all_annotation_string_counts(features, sequence_annotations,\n annotations):\n feature_annotations = {}\n for cseq, annotations_list in sequence_annotations.items():\n if cseq not in features:\n continue\n newdesc = []\n for cannotation in annotations_list:\n cdesc = getannotationstrings2(annotations[cannotation])\n newdesc.append((cdesc, 1))\n feature_annotations[cseq] = newdesc\n return feature_annotations\n\n\ndef _get_all_term_counts(features, feature_annotations, annotations):\n \"\"\"Get counts of all terms associated with each feature\n\n Parameters\n ----------\n features: list of str\n the sequences to get the terms for\n feature_annotations: dict of {feature (str): annotationIDs (list of int))\n the list of annotations each feature appears in\n annotations: dict of {annotationsid (int): annotation details (dict)}\n all the annotations in the experiment\n\n Returns\n -------\n dict of {feature (str): annotation counts (list of (term(str), count(int)))}\n \"\"\"\n feature_terms = {}\n for cfeature in features:\n annotation_list = [annotations[x] for x in feature_annotations[\n cfeature]]\n feature_terms[cfeature] = get_annotation_term_counts(annotation_list)\n return feature_terms\n\n\ndef get_annotation_term_counts(annotations):\n \"\"\"Get the annotation type corrected count for all terms in annotations\n\n Parameters\n ----------\n annotations : list of dict\n list of annotations\n\n Returns\n -------\n list of tuples (term, count)\n \"\"\"\n term_count = defaultdict(int)\n for cannotation in annotations:\n if cannotation['annotationtype'] == 'common':\n for cdesc in cannotation['details']:\n term_count[cdesc[1]] += 1\n continue\n if cannotation['annotationtype'] == 'dominant':\n for cdesc in cannotation['details']:\n term_count[cdesc[1]] += 2\n continue\n if cannotation['annotationtype'] == 'other':\n for cdesc in cannotation['details']:\n term_count[cdesc[1]] += 0.5\n continue\n if cannotation['annotationtype'] == 'contamination':\n term_count['contamination'] += 1\n continue\n if cannotation['annotationtype'] in ['diffexp',\n 'positive correlation', 'negative correlation']:\n for cdesc in cannotation['details']:\n if cdesc[0] == 'all':\n term_count[cdesc[1]] += 1\n continue\n if cdesc[0] == 'high':\n term_count[cdesc[1]] += 2\n continue\n if cdesc[0] == 'low':\n term_count[cdesc[1]] -= 2\n continue\n debug(4, 'unknown detail type %s encountered' % cdesc[0])\n continue\n if cannotation['annotationtype'] == 'other':\n continue\n debug(4, 'unknown annotation type %s encountered' % cannotation[\n 'annotationtype'])\n res = []\n for k, v in term_count.items():\n if v < 0:\n k = '-' + k\n v = -v\n res.append((k, v))\n return res\n\n\ndef enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n if term_type == 'term':\n debug(2, 'getting all_term counts')\n feature_terms = _get_all_term_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n elif term_type == 'annotation':\n debug(2, 'getting all_annotation string counts')\n feature_terms = _get_all_annotation_string_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n else:\n debug(8, 'strange term_type encountered: %s' % term_type)\n all_terms_set = set()\n for cterms in feature_terms.values():\n for cterm, ccount in cterms:\n all_terms_set.add(cterm)\n debug(2, 'found %d terms associated with all sequences (%d)' % (len(\n all_terms_set), len(all_seqs)))\n debug(2, 'getting seqs1 feature array')\n feature_array, term_list = _get_term_features(seqs1, feature_terms)\n debug(2, 'getting seqs2 feature array')\n bg_array, term_list = _get_term_features(seqs2, feature_terms)\n debug(2, 'bgarray: %s, feature_array: %s' % (bg_array.shape,\n feature_array.shape))\n all_feature_array = np.hstack([feature_array, bg_array])\n labels = np.zeros(all_feature_array.shape[1])\n labels[:feature_array.shape[1]] = 1\n debug(2, 'starting dsfdr for enrichment')\n keep, odif, pvals = dsfdr(all_feature_array, labels, method='meandiff',\n transform_type=None, alpha=0.1, numperm=1000, fdr_method='dsfdr')\n keep = np.where(keep)[0]\n if len(keep) == 0:\n debug(2, 'no enriched terms found')\n term_list = np.array(term_list)[keep]\n odif = odif[keep]\n pvals = pvals[keep]\n si = np.argsort(odif)\n odif = odif[si]\n pvals = pvals[si]\n term_list = term_list[si]\n return '', term_list, pvals, odif\n",
"<import token>\n\n\ndef calour_enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n import calour as ca\n db = ca.database._get_database_class('dbbact')\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n terms_df, resmat, features_df = db.db.term_enrichment(seqs1, seqs2,\n info['annotations'], info['sequence_annotations'], term_type=term_type)\n print(terms_df)\n return '', terms_df['feature'].values, terms_df['qval'], terms_df['odif']\n\n\ndef getannotationstrings2(cann):\n \"\"\"\n get a nice string summary of a curation\n\n input:\n cann : dict from /sequences/get_annotations (one from the list)\n output:\n cdesc : str\n a short summary of each annotation\n \"\"\"\n cdesc = ''\n if cann['description']:\n cdesc += cann['description'] + ' ('\n if cann['annotationtype'] == 'diffexp':\n chigh = []\n clow = []\n call = []\n for cdet in cann['details']:\n if cdet[0] == 'all':\n call.append(cdet[1])\n continue\n if cdet[0] == 'low':\n clow.append(cdet[1])\n continue\n if cdet[0] == 'high':\n chigh.append(cdet[1])\n continue\n cdesc += ' high in '\n for cval in chigh:\n cdesc += cval + ' '\n cdesc += ' compared to '\n for cval in clow:\n cdesc += cval + ' '\n cdesc += ' in '\n for cval in call:\n cdesc += cval + ' '\n elif cann['annotationtype'] == 'isa':\n cdesc += ' is a '\n for cdet in cann['details']:\n cdesc += 'cdet,'\n elif cann['annotationtype'] == 'contamination':\n cdesc += 'contamination'\n else:\n cdesc += cann['annotationtype'] + ' '\n for cdet in cann['details']:\n cdesc = cdesc + ' ' + cdet[1] + ','\n if len(cdesc) >= 1 and cdesc[-1] == ',':\n cdesc = cdesc[:-1]\n return cdesc\n\n\ndef get_seq_annotations_fast(sequences):\n debug(2, 'get_seq_annotations_fast for %d sequences' % len(sequences))\n rdata = {}\n rdata['sequences'] = sequences\n res = requests.get(get_dbbact_server_address() +\n '/sequences/get_fast_annotations', json=rdata)\n if res.status_code != 200:\n debug(5, 'error getting fast annotations for sequence list')\n return None, None, None\n res = res.json()\n debug(2, 'got %d total annotations' % len(res['annotations']))\n sequence_terms = {}\n sequence_annotations = {}\n for cseq in sequences:\n sequence_terms[cseq] = []\n sequence_annotations[cseq] = []\n for cseqannotation in res['seqannotations']:\n cpos = cseqannotation[0]\n cseq = sequences[cpos]\n sequence_annotations[cseq].extend(cseqannotation[1])\n for cannotation in cseqannotation[1]:\n for k, v in res['annotations'][str(cannotation)]['parents'].items(\n ):\n if k == 'high' or k == 'all':\n for cterm in v:\n sequence_terms[cseq].append(cterm)\n elif k == 'low':\n for cterm in v:\n sequence_terms[cseq].append('-' + cterm)\n annotations = res['annotations']\n keys = list(annotations.keys())\n for cid in keys:\n annotations[int(cid)] = annotations.pop(cid)\n total_annotations = 0\n for cseq_annotations in sequence_annotations.values():\n total_annotations += len(cseq_annotations)\n debug(2, 'Got %d associations' % total_annotations)\n return sequence_terms, sequence_annotations, annotations\n\n\ndef _get_term_features(features, feature_terms):\n \"\"\"Get numpy array of score of each term for each feature\n\n Parameters\n ----------\n features : list of str\n A list of DNA sequences\n feature_terms : dict of {feature: list of tuples of (term, amount)}\n The terms associated with each feature in exp\n feature (key) : str the feature (out of exp) to which the terms relate\n feature_terms (value) : list of tuples of (str or int the terms associated with this feature, count)\n\n Returns\n -------\n numpy array of T (terms) * F (features)\n total counts of each term (row) in each feature (column)\n list of str\n list of the terms corresponding to the numpy array rows\n \"\"\"\n terms = {}\n cpos = 0\n for cfeature, ctermlist in feature_terms.items():\n for cterm, ccount in ctermlist:\n if cterm not in terms:\n terms[cterm] = cpos\n cpos += 1\n tot_features_inflated = 0\n feature_pos = {}\n for cfeature in features:\n ctermlist = feature_terms[cfeature]\n feature_pos[cfeature] = tot_features_inflated\n tot_features_inflated += len(ctermlist)\n res = np.zeros([len(terms), len(features)])\n for idx, cfeature in enumerate(features):\n for cterm, ctermcount in feature_terms[cfeature]:\n res[terms[cterm], idx] += ctermcount\n term_list = sorted(terms, key=terms.get)\n debug(2, \n 'created terms X features matrix with %d terms (rows), %d features (columns)'\n % (res.shape[0], res.shape[1]))\n return res, term_list\n\n\n<function token>\n\n\ndef _get_all_annotation_string_counts(features, sequence_annotations,\n annotations):\n feature_annotations = {}\n for cseq, annotations_list in sequence_annotations.items():\n if cseq not in features:\n continue\n newdesc = []\n for cannotation in annotations_list:\n cdesc = getannotationstrings2(annotations[cannotation])\n newdesc.append((cdesc, 1))\n feature_annotations[cseq] = newdesc\n return feature_annotations\n\n\ndef _get_all_term_counts(features, feature_annotations, annotations):\n \"\"\"Get counts of all terms associated with each feature\n\n Parameters\n ----------\n features: list of str\n the sequences to get the terms for\n feature_annotations: dict of {feature (str): annotationIDs (list of int))\n the list of annotations each feature appears in\n annotations: dict of {annotationsid (int): annotation details (dict)}\n all the annotations in the experiment\n\n Returns\n -------\n dict of {feature (str): annotation counts (list of (term(str), count(int)))}\n \"\"\"\n feature_terms = {}\n for cfeature in features:\n annotation_list = [annotations[x] for x in feature_annotations[\n cfeature]]\n feature_terms[cfeature] = get_annotation_term_counts(annotation_list)\n return feature_terms\n\n\n<function token>\n\n\ndef enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n if term_type == 'term':\n debug(2, 'getting all_term counts')\n feature_terms = _get_all_term_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n elif term_type == 'annotation':\n debug(2, 'getting all_annotation string counts')\n feature_terms = _get_all_annotation_string_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n else:\n debug(8, 'strange term_type encountered: %s' % term_type)\n all_terms_set = set()\n for cterms in feature_terms.values():\n for cterm, ccount in cterms:\n all_terms_set.add(cterm)\n debug(2, 'found %d terms associated with all sequences (%d)' % (len(\n all_terms_set), len(all_seqs)))\n debug(2, 'getting seqs1 feature array')\n feature_array, term_list = _get_term_features(seqs1, feature_terms)\n debug(2, 'getting seqs2 feature array')\n bg_array, term_list = _get_term_features(seqs2, feature_terms)\n debug(2, 'bgarray: %s, feature_array: %s' % (bg_array.shape,\n feature_array.shape))\n all_feature_array = np.hstack([feature_array, bg_array])\n labels = np.zeros(all_feature_array.shape[1])\n labels[:feature_array.shape[1]] = 1\n debug(2, 'starting dsfdr for enrichment')\n keep, odif, pvals = dsfdr(all_feature_array, labels, method='meandiff',\n transform_type=None, alpha=0.1, numperm=1000, fdr_method='dsfdr')\n keep = np.where(keep)[0]\n if len(keep) == 0:\n debug(2, 'no enriched terms found')\n term_list = np.array(term_list)[keep]\n odif = odif[keep]\n pvals = pvals[keep]\n si = np.argsort(odif)\n odif = odif[si]\n pvals = pvals[si]\n term_list = term_list[si]\n return '', term_list, pvals, odif\n",
"<import token>\n\n\ndef calour_enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n import calour as ca\n db = ca.database._get_database_class('dbbact')\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n terms_df, resmat, features_df = db.db.term_enrichment(seqs1, seqs2,\n info['annotations'], info['sequence_annotations'], term_type=term_type)\n print(terms_df)\n return '', terms_df['feature'].values, terms_df['qval'], terms_df['odif']\n\n\ndef getannotationstrings2(cann):\n \"\"\"\n get a nice string summary of a curation\n\n input:\n cann : dict from /sequences/get_annotations (one from the list)\n output:\n cdesc : str\n a short summary of each annotation\n \"\"\"\n cdesc = ''\n if cann['description']:\n cdesc += cann['description'] + ' ('\n if cann['annotationtype'] == 'diffexp':\n chigh = []\n clow = []\n call = []\n for cdet in cann['details']:\n if cdet[0] == 'all':\n call.append(cdet[1])\n continue\n if cdet[0] == 'low':\n clow.append(cdet[1])\n continue\n if cdet[0] == 'high':\n chigh.append(cdet[1])\n continue\n cdesc += ' high in '\n for cval in chigh:\n cdesc += cval + ' '\n cdesc += ' compared to '\n for cval in clow:\n cdesc += cval + ' '\n cdesc += ' in '\n for cval in call:\n cdesc += cval + ' '\n elif cann['annotationtype'] == 'isa':\n cdesc += ' is a '\n for cdet in cann['details']:\n cdesc += 'cdet,'\n elif cann['annotationtype'] == 'contamination':\n cdesc += 'contamination'\n else:\n cdesc += cann['annotationtype'] + ' '\n for cdet in cann['details']:\n cdesc = cdesc + ' ' + cdet[1] + ','\n if len(cdesc) >= 1 and cdesc[-1] == ',':\n cdesc = cdesc[:-1]\n return cdesc\n\n\ndef get_seq_annotations_fast(sequences):\n debug(2, 'get_seq_annotations_fast for %d sequences' % len(sequences))\n rdata = {}\n rdata['sequences'] = sequences\n res = requests.get(get_dbbact_server_address() +\n '/sequences/get_fast_annotations', json=rdata)\n if res.status_code != 200:\n debug(5, 'error getting fast annotations for sequence list')\n return None, None, None\n res = res.json()\n debug(2, 'got %d total annotations' % len(res['annotations']))\n sequence_terms = {}\n sequence_annotations = {}\n for cseq in sequences:\n sequence_terms[cseq] = []\n sequence_annotations[cseq] = []\n for cseqannotation in res['seqannotations']:\n cpos = cseqannotation[0]\n cseq = sequences[cpos]\n sequence_annotations[cseq].extend(cseqannotation[1])\n for cannotation in cseqannotation[1]:\n for k, v in res['annotations'][str(cannotation)]['parents'].items(\n ):\n if k == 'high' or k == 'all':\n for cterm in v:\n sequence_terms[cseq].append(cterm)\n elif k == 'low':\n for cterm in v:\n sequence_terms[cseq].append('-' + cterm)\n annotations = res['annotations']\n keys = list(annotations.keys())\n for cid in keys:\n annotations[int(cid)] = annotations.pop(cid)\n total_annotations = 0\n for cseq_annotations in sequence_annotations.values():\n total_annotations += len(cseq_annotations)\n debug(2, 'Got %d associations' % total_annotations)\n return sequence_terms, sequence_annotations, annotations\n\n\n<function token>\n<function token>\n\n\ndef _get_all_annotation_string_counts(features, sequence_annotations,\n annotations):\n feature_annotations = {}\n for cseq, annotations_list in sequence_annotations.items():\n if cseq not in features:\n continue\n newdesc = []\n for cannotation in annotations_list:\n cdesc = getannotationstrings2(annotations[cannotation])\n newdesc.append((cdesc, 1))\n feature_annotations[cseq] = newdesc\n return feature_annotations\n\n\ndef _get_all_term_counts(features, feature_annotations, annotations):\n \"\"\"Get counts of all terms associated with each feature\n\n Parameters\n ----------\n features: list of str\n the sequences to get the terms for\n feature_annotations: dict of {feature (str): annotationIDs (list of int))\n the list of annotations each feature appears in\n annotations: dict of {annotationsid (int): annotation details (dict)}\n all the annotations in the experiment\n\n Returns\n -------\n dict of {feature (str): annotation counts (list of (term(str), count(int)))}\n \"\"\"\n feature_terms = {}\n for cfeature in features:\n annotation_list = [annotations[x] for x in feature_annotations[\n cfeature]]\n feature_terms[cfeature] = get_annotation_term_counts(annotation_list)\n return feature_terms\n\n\n<function token>\n\n\ndef enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n if term_type == 'term':\n debug(2, 'getting all_term counts')\n feature_terms = _get_all_term_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n elif term_type == 'annotation':\n debug(2, 'getting all_annotation string counts')\n feature_terms = _get_all_annotation_string_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n else:\n debug(8, 'strange term_type encountered: %s' % term_type)\n all_terms_set = set()\n for cterms in feature_terms.values():\n for cterm, ccount in cterms:\n all_terms_set.add(cterm)\n debug(2, 'found %d terms associated with all sequences (%d)' % (len(\n all_terms_set), len(all_seqs)))\n debug(2, 'getting seqs1 feature array')\n feature_array, term_list = _get_term_features(seqs1, feature_terms)\n debug(2, 'getting seqs2 feature array')\n bg_array, term_list = _get_term_features(seqs2, feature_terms)\n debug(2, 'bgarray: %s, feature_array: %s' % (bg_array.shape,\n feature_array.shape))\n all_feature_array = np.hstack([feature_array, bg_array])\n labels = np.zeros(all_feature_array.shape[1])\n labels[:feature_array.shape[1]] = 1\n debug(2, 'starting dsfdr for enrichment')\n keep, odif, pvals = dsfdr(all_feature_array, labels, method='meandiff',\n transform_type=None, alpha=0.1, numperm=1000, fdr_method='dsfdr')\n keep = np.where(keep)[0]\n if len(keep) == 0:\n debug(2, 'no enriched terms found')\n term_list = np.array(term_list)[keep]\n odif = odif[keep]\n pvals = pvals[keep]\n si = np.argsort(odif)\n odif = odif[si]\n pvals = pvals[si]\n term_list = term_list[si]\n return '', term_list, pvals, odif\n",
"<import token>\n\n\ndef calour_enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n import calour as ca\n db = ca.database._get_database_class('dbbact')\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n terms_df, resmat, features_df = db.db.term_enrichment(seqs1, seqs2,\n info['annotations'], info['sequence_annotations'], term_type=term_type)\n print(terms_df)\n return '', terms_df['feature'].values, terms_df['qval'], terms_df['odif']\n\n\n<function token>\n\n\ndef get_seq_annotations_fast(sequences):\n debug(2, 'get_seq_annotations_fast for %d sequences' % len(sequences))\n rdata = {}\n rdata['sequences'] = sequences\n res = requests.get(get_dbbact_server_address() +\n '/sequences/get_fast_annotations', json=rdata)\n if res.status_code != 200:\n debug(5, 'error getting fast annotations for sequence list')\n return None, None, None\n res = res.json()\n debug(2, 'got %d total annotations' % len(res['annotations']))\n sequence_terms = {}\n sequence_annotations = {}\n for cseq in sequences:\n sequence_terms[cseq] = []\n sequence_annotations[cseq] = []\n for cseqannotation in res['seqannotations']:\n cpos = cseqannotation[0]\n cseq = sequences[cpos]\n sequence_annotations[cseq].extend(cseqannotation[1])\n for cannotation in cseqannotation[1]:\n for k, v in res['annotations'][str(cannotation)]['parents'].items(\n ):\n if k == 'high' or k == 'all':\n for cterm in v:\n sequence_terms[cseq].append(cterm)\n elif k == 'low':\n for cterm in v:\n sequence_terms[cseq].append('-' + cterm)\n annotations = res['annotations']\n keys = list(annotations.keys())\n for cid in keys:\n annotations[int(cid)] = annotations.pop(cid)\n total_annotations = 0\n for cseq_annotations in sequence_annotations.values():\n total_annotations += len(cseq_annotations)\n debug(2, 'Got %d associations' % total_annotations)\n return sequence_terms, sequence_annotations, annotations\n\n\n<function token>\n<function token>\n\n\ndef _get_all_annotation_string_counts(features, sequence_annotations,\n annotations):\n feature_annotations = {}\n for cseq, annotations_list in sequence_annotations.items():\n if cseq not in features:\n continue\n newdesc = []\n for cannotation in annotations_list:\n cdesc = getannotationstrings2(annotations[cannotation])\n newdesc.append((cdesc, 1))\n feature_annotations[cseq] = newdesc\n return feature_annotations\n\n\ndef _get_all_term_counts(features, feature_annotations, annotations):\n \"\"\"Get counts of all terms associated with each feature\n\n Parameters\n ----------\n features: list of str\n the sequences to get the terms for\n feature_annotations: dict of {feature (str): annotationIDs (list of int))\n the list of annotations each feature appears in\n annotations: dict of {annotationsid (int): annotation details (dict)}\n all the annotations in the experiment\n\n Returns\n -------\n dict of {feature (str): annotation counts (list of (term(str), count(int)))}\n \"\"\"\n feature_terms = {}\n for cfeature in features:\n annotation_list = [annotations[x] for x in feature_annotations[\n cfeature]]\n feature_terms[cfeature] = get_annotation_term_counts(annotation_list)\n return feature_terms\n\n\n<function token>\n\n\ndef enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n if term_type == 'term':\n debug(2, 'getting all_term counts')\n feature_terms = _get_all_term_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n elif term_type == 'annotation':\n debug(2, 'getting all_annotation string counts')\n feature_terms = _get_all_annotation_string_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n else:\n debug(8, 'strange term_type encountered: %s' % term_type)\n all_terms_set = set()\n for cterms in feature_terms.values():\n for cterm, ccount in cterms:\n all_terms_set.add(cterm)\n debug(2, 'found %d terms associated with all sequences (%d)' % (len(\n all_terms_set), len(all_seqs)))\n debug(2, 'getting seqs1 feature array')\n feature_array, term_list = _get_term_features(seqs1, feature_terms)\n debug(2, 'getting seqs2 feature array')\n bg_array, term_list = _get_term_features(seqs2, feature_terms)\n debug(2, 'bgarray: %s, feature_array: %s' % (bg_array.shape,\n feature_array.shape))\n all_feature_array = np.hstack([feature_array, bg_array])\n labels = np.zeros(all_feature_array.shape[1])\n labels[:feature_array.shape[1]] = 1\n debug(2, 'starting dsfdr for enrichment')\n keep, odif, pvals = dsfdr(all_feature_array, labels, method='meandiff',\n transform_type=None, alpha=0.1, numperm=1000, fdr_method='dsfdr')\n keep = np.where(keep)[0]\n if len(keep) == 0:\n debug(2, 'no enriched terms found')\n term_list = np.array(term_list)[keep]\n odif = odif[keep]\n pvals = pvals[keep]\n si = np.argsort(odif)\n odif = odif[si]\n pvals = pvals[si]\n term_list = term_list[si]\n return '', term_list, pvals, odif\n",
"<import token>\n\n\ndef calour_enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n import calour as ca\n db = ca.database._get_database_class('dbbact')\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n terms_df, resmat, features_df = db.db.term_enrichment(seqs1, seqs2,\n info['annotations'], info['sequence_annotations'], term_type=term_type)\n print(terms_df)\n return '', terms_df['feature'].values, terms_df['qval'], terms_df['odif']\n\n\n<function token>\n\n\ndef get_seq_annotations_fast(sequences):\n debug(2, 'get_seq_annotations_fast for %d sequences' % len(sequences))\n rdata = {}\n rdata['sequences'] = sequences\n res = requests.get(get_dbbact_server_address() +\n '/sequences/get_fast_annotations', json=rdata)\n if res.status_code != 200:\n debug(5, 'error getting fast annotations for sequence list')\n return None, None, None\n res = res.json()\n debug(2, 'got %d total annotations' % len(res['annotations']))\n sequence_terms = {}\n sequence_annotations = {}\n for cseq in sequences:\n sequence_terms[cseq] = []\n sequence_annotations[cseq] = []\n for cseqannotation in res['seqannotations']:\n cpos = cseqannotation[0]\n cseq = sequences[cpos]\n sequence_annotations[cseq].extend(cseqannotation[1])\n for cannotation in cseqannotation[1]:\n for k, v in res['annotations'][str(cannotation)]['parents'].items(\n ):\n if k == 'high' or k == 'all':\n for cterm in v:\n sequence_terms[cseq].append(cterm)\n elif k == 'low':\n for cterm in v:\n sequence_terms[cseq].append('-' + cterm)\n annotations = res['annotations']\n keys = list(annotations.keys())\n for cid in keys:\n annotations[int(cid)] = annotations.pop(cid)\n total_annotations = 0\n for cseq_annotations in sequence_annotations.values():\n total_annotations += len(cseq_annotations)\n debug(2, 'Got %d associations' % total_annotations)\n return sequence_terms, sequence_annotations, annotations\n\n\n<function token>\n<function token>\n\n\ndef _get_all_annotation_string_counts(features, sequence_annotations,\n annotations):\n feature_annotations = {}\n for cseq, annotations_list in sequence_annotations.items():\n if cseq not in features:\n continue\n newdesc = []\n for cannotation in annotations_list:\n cdesc = getannotationstrings2(annotations[cannotation])\n newdesc.append((cdesc, 1))\n feature_annotations[cseq] = newdesc\n return feature_annotations\n\n\n<function token>\n<function token>\n\n\ndef enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n if term_type == 'term':\n debug(2, 'getting all_term counts')\n feature_terms = _get_all_term_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n elif term_type == 'annotation':\n debug(2, 'getting all_annotation string counts')\n feature_terms = _get_all_annotation_string_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n else:\n debug(8, 'strange term_type encountered: %s' % term_type)\n all_terms_set = set()\n for cterms in feature_terms.values():\n for cterm, ccount in cterms:\n all_terms_set.add(cterm)\n debug(2, 'found %d terms associated with all sequences (%d)' % (len(\n all_terms_set), len(all_seqs)))\n debug(2, 'getting seqs1 feature array')\n feature_array, term_list = _get_term_features(seqs1, feature_terms)\n debug(2, 'getting seqs2 feature array')\n bg_array, term_list = _get_term_features(seqs2, feature_terms)\n debug(2, 'bgarray: %s, feature_array: %s' % (bg_array.shape,\n feature_array.shape))\n all_feature_array = np.hstack([feature_array, bg_array])\n labels = np.zeros(all_feature_array.shape[1])\n labels[:feature_array.shape[1]] = 1\n debug(2, 'starting dsfdr for enrichment')\n keep, odif, pvals = dsfdr(all_feature_array, labels, method='meandiff',\n transform_type=None, alpha=0.1, numperm=1000, fdr_method='dsfdr')\n keep = np.where(keep)[0]\n if len(keep) == 0:\n debug(2, 'no enriched terms found')\n term_list = np.array(term_list)[keep]\n odif = odif[keep]\n pvals = pvals[keep]\n si = np.argsort(odif)\n odif = odif[si]\n pvals = pvals[si]\n term_list = term_list[si]\n return '', term_list, pvals, odif\n",
"<import token>\n\n\ndef calour_enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n import calour as ca\n db = ca.database._get_database_class('dbbact')\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n terms_df, resmat, features_df = db.db.term_enrichment(seqs1, seqs2,\n info['annotations'], info['sequence_annotations'], term_type=term_type)\n print(terms_df)\n return '', terms_df['feature'].values, terms_df['qval'], terms_df['odif']\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _get_all_annotation_string_counts(features, sequence_annotations,\n annotations):\n feature_annotations = {}\n for cseq, annotations_list in sequence_annotations.items():\n if cseq not in features:\n continue\n newdesc = []\n for cannotation in annotations_list:\n cdesc = getannotationstrings2(annotations[cannotation])\n newdesc.append((cdesc, 1))\n feature_annotations[cseq] = newdesc\n return feature_annotations\n\n\n<function token>\n<function token>\n\n\ndef enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n if term_type == 'term':\n debug(2, 'getting all_term counts')\n feature_terms = _get_all_term_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n elif term_type == 'annotation':\n debug(2, 'getting all_annotation string counts')\n feature_terms = _get_all_annotation_string_counts(all_seqs, info[\n 'sequence_annotations'], info['annotations'])\n else:\n debug(8, 'strange term_type encountered: %s' % term_type)\n all_terms_set = set()\n for cterms in feature_terms.values():\n for cterm, ccount in cterms:\n all_terms_set.add(cterm)\n debug(2, 'found %d terms associated with all sequences (%d)' % (len(\n all_terms_set), len(all_seqs)))\n debug(2, 'getting seqs1 feature array')\n feature_array, term_list = _get_term_features(seqs1, feature_terms)\n debug(2, 'getting seqs2 feature array')\n bg_array, term_list = _get_term_features(seqs2, feature_terms)\n debug(2, 'bgarray: %s, feature_array: %s' % (bg_array.shape,\n feature_array.shape))\n all_feature_array = np.hstack([feature_array, bg_array])\n labels = np.zeros(all_feature_array.shape[1])\n labels[:feature_array.shape[1]] = 1\n debug(2, 'starting dsfdr for enrichment')\n keep, odif, pvals = dsfdr(all_feature_array, labels, method='meandiff',\n transform_type=None, alpha=0.1, numperm=1000, fdr_method='dsfdr')\n keep = np.where(keep)[0]\n if len(keep) == 0:\n debug(2, 'no enriched terms found')\n term_list = np.array(term_list)[keep]\n odif = odif[keep]\n pvals = pvals[keep]\n si = np.argsort(odif)\n odif = odif[si]\n pvals = pvals[si]\n term_list = term_list[si]\n return '', term_list, pvals, odif\n",
"<import token>\n\n\ndef calour_enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n import calour as ca\n db = ca.database._get_database_class('dbbact')\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n terms_df, resmat, features_df = db.db.term_enrichment(seqs1, seqs2,\n info['annotations'], info['sequence_annotations'], term_type=term_type)\n print(terms_df)\n return '', terms_df['feature'].values, terms_df['qval'], terms_df['odif']\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _get_all_annotation_string_counts(features, sequence_annotations,\n annotations):\n feature_annotations = {}\n for cseq, annotations_list in sequence_annotations.items():\n if cseq not in features:\n continue\n newdesc = []\n for cannotation in annotations_list:\n cdesc = getannotationstrings2(annotations[cannotation])\n newdesc.append((cdesc, 1))\n feature_annotations[cseq] = newdesc\n return feature_annotations\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef calour_enrichment(seqs1, seqs2, term_type='term'):\n \"\"\"\n Do dbbact term and annotation enrichment analysis for 2 lists of sequences (comparing first to second list of sequences)\n\n Parameters\n ----------\n seqs1:list of str\n first set of sequences (ACGT)\n seqs1:list of str\n second set of sequences (ACGT)\n term_type : str (optional)\n type of the term to analyze for enrichment. can be:\n \"term\" : analyze the terms per annotation (not including parent terms)\n \"annotation\" : analyze the annotations associated with each sequence\n\n Returns\n -------\n err : str\n empty if ok, otherwise the error encountered\n term_list : list of str\n the terms which are enriched\n pvals : list of float\n the p-value for each term\n odif : list of float\n the effect size for each term\n \"\"\"\n import calour as ca\n db = ca.database._get_database_class('dbbact')\n np.random.seed(2018)\n all_seqs = set(seqs1).union(set(seqs2))\n seqs2 = list(all_seqs - set(seqs1))\n if len(seqs2) == 0:\n return (\n 'No sequences remaining in background fasta after removing the sequences of interest'\n , None, None, None)\n all_seqs = list(all_seqs)\n info = {}\n info['sequence_terms'], info['sequence_annotations'], info['annotations'\n ] = get_seq_annotations_fast(all_seqs)\n terms_df, resmat, features_df = db.db.term_enrichment(seqs1, seqs2,\n info['annotations'], info['sequence_annotations'], term_type=term_type)\n print(terms_df)\n return '', terms_df['feature'].values, terms_df['qval'], terms_df['odif']\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,314 |
975fc65f08368d59cb67e33f676f97842c7cec40
|
# importing files
from spy_details import spy
from start_chat import start_chat
from spy_detail import spy_detail
# program starts from here
print("***************\n welcome!!!!.... \n***************")
proceed = 1
while proceed:
# checking while user want to proceed with default user or not and taking action according to it
proceed = raw_input("\nwant to proceed with default user (y/n)")
if proceed.lower() == 'y':
proceed = 0
start_chat(spy)
elif proceed.lower() == 'n':
spy = spy_detail()
proceed = 0
start_chat(spy)
else:
print "\n!!!!!!!!!!!!!!!\nENTER CAREFULLY\n!!!!!!!!!!!!!!!"
|
[
"# importing files\nfrom spy_details import spy\nfrom start_chat import start_chat\nfrom spy_detail import spy_detail\n\n# program starts from here\nprint(\"***************\\n welcome!!!!.... \\n***************\")\n\nproceed = 1\nwhile proceed:\n # checking while user want to proceed with default user or not and taking action according to it\n proceed = raw_input(\"\\nwant to proceed with default user (y/n)\")\n if proceed.lower() == 'y':\n proceed = 0\n start_chat(spy)\n\n elif proceed.lower() == 'n':\n spy = spy_detail()\n proceed = 0\n start_chat(spy)\n else:\n print \"\\n!!!!!!!!!!!!!!!\\nENTER CAREFULLY\\n!!!!!!!!!!!!!!!\"\n"
] | true |
98,315 |
e13a47b45a00337ccf71a2e5bd7103e998839628
|
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
verbose_name = '书城用户管理'
verbose_plural = verbose_name
|
[
"from django.apps import AppConfig\n\n\nclass UsersConfig(AppConfig):\n name = 'users'\n verbose_name = '书城用户管理'\n verbose_plural = verbose_name\n",
"<import token>\n\n\nclass UsersConfig(AppConfig):\n name = 'users'\n verbose_name = '书城用户管理'\n verbose_plural = verbose_name\n",
"<import token>\n\n\nclass UsersConfig(AppConfig):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,316 |
5340e044a97f9734e923212fcd2bb9a0a9d81dd9
|
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import BatchHttpRequest
from pprint import pprint
from datetime import datetime
import time
import itertools
import pandas as pd
import logging
import os
def get_creds():
"""
Authenticates the user either by existing pickle token, or generate new one
// TODO add instruction on how to get the credentials.json
:return: creds
"""
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
creds = None
dir_pre = '../secrets'
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
logging.debug(f"searching for creds in path: {os.getcwd()}")
if os.path.exists(os.path.join(dir_pre, 'token.pickle')):
with open(os.path.join(dir_pre, 'token.pickle'), 'rb') as token:
creds = pickle.load(token)
logging.info('opening token.pickle')
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
logging.info('token expired, refreshing token')
creds.refresh(Request())
else:
logging.info('token not found, re authenticating ')
flow = InstalledAppFlow.from_client_secrets_file(os.path.join(dir_pre, 'credentials.json'), SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(os.path.join(dir_pre, 'token.pickle'), 'wb') as token:
pickle.dump(creds, token)
return creds
def list_user_labels(service):
"""
:param service:
List all labels the user has, default + custom
"""
logging.info('getting labels')
labels = service.users().labels().list(userId='me').execute()
label_list = [label['name'] for label in labels['labels']]
return label_list
def list_email_ids_by_label(service, label, max_results=500):
"""
:param service:
:param label:
:param max_results:
:return:
"""
email_ids = set()
next_page_token = ''
try:
while True:
response = service.users().messages().list(userId='me', labelIds=label, maxResults=max_results,
pageToken=next_page_token).execute()
# extract id
for msg_id in response['messages']:
email_ids.add(msg_id['id'])
logging.debug(f"total message ids: {len(email_ids)}")
if 'nextPageToken' in response:
print('next page token:', response['nextPageToken'])
next_page_token = response['nextPageToken']
else:
break
# uncomment for testing cap 500
# break
except:
print('cannot get emails with this label', label)
return email_ids
def list_user_info(service):
"""
list basic user info
:param service:
:return:
"""
profile = service.users().getProfile(userId='me').execute()
return profile
|
[
"from __future__ import print_function\nimport pickle\nimport os.path\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom googleapiclient.http import BatchHttpRequest\nfrom pprint import pprint\nfrom datetime import datetime\nimport time\nimport itertools\nimport pandas as pd\nimport logging\nimport os\n\n\ndef get_creds():\n \"\"\"\n Authenticates the user either by existing pickle token, or generate new one\n // TODO add instruction on how to get the credentials.json\n :return: creds\n \"\"\"\n # If modifying these scopes, delete the file token.pickle.\n SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']\n creds = None\n dir_pre = '../secrets'\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n logging.debug(f\"searching for creds in path: {os.getcwd()}\")\n if os.path.exists(os.path.join(dir_pre, 'token.pickle')):\n with open(os.path.join(dir_pre, 'token.pickle'), 'rb') as token:\n creds = pickle.load(token)\n logging.info('opening token.pickle')\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n logging.info('token expired, refreshing token')\n creds.refresh(Request())\n else:\n logging.info('token not found, re authenticating ')\n flow = InstalledAppFlow.from_client_secrets_file(os.path.join(dir_pre, 'credentials.json'), SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(os.path.join(dir_pre, 'token.pickle'), 'wb') as token:\n pickle.dump(creds, token)\n\n return creds\n\n\ndef list_user_labels(service):\n \"\"\"\n :param service:\n List all labels the user has, default + custom\n \"\"\"\n logging.info('getting labels')\n labels = service.users().labels().list(userId='me').execute()\n label_list = [label['name'] for label in labels['labels']]\n\n return label_list\n\n\ndef list_email_ids_by_label(service, label, max_results=500):\n \"\"\"\n :param service:\n :param label:\n :param max_results:\n :return:\n \"\"\"\n\n email_ids = set()\n next_page_token = ''\n\n try:\n while True:\n response = service.users().messages().list(userId='me', labelIds=label, maxResults=max_results,\n pageToken=next_page_token).execute()\n # extract id\n for msg_id in response['messages']:\n email_ids.add(msg_id['id'])\n logging.debug(f\"total message ids: {len(email_ids)}\")\n\n if 'nextPageToken' in response:\n print('next page token:', response['nextPageToken'])\n next_page_token = response['nextPageToken']\n else:\n break\n # uncomment for testing cap 500\n # break\n except:\n print('cannot get emails with this label', label)\n return email_ids\n\ndef list_user_info(service):\n \"\"\"\n list basic user info\n :param service:\n :return:\n \"\"\"\n profile = service.users().getProfile(userId='me').execute()\n return profile\n",
"from __future__ import print_function\nimport pickle\nimport os.path\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom googleapiclient.http import BatchHttpRequest\nfrom pprint import pprint\nfrom datetime import datetime\nimport time\nimport itertools\nimport pandas as pd\nimport logging\nimport os\n\n\ndef get_creds():\n \"\"\"\n Authenticates the user either by existing pickle token, or generate new one\n // TODO add instruction on how to get the credentials.json\n :return: creds\n \"\"\"\n SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']\n creds = None\n dir_pre = '../secrets'\n logging.debug(f'searching for creds in path: {os.getcwd()}')\n if os.path.exists(os.path.join(dir_pre, 'token.pickle')):\n with open(os.path.join(dir_pre, 'token.pickle'), 'rb') as token:\n creds = pickle.load(token)\n logging.info('opening token.pickle')\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n logging.info('token expired, refreshing token')\n creds.refresh(Request())\n else:\n logging.info('token not found, re authenticating ')\n flow = InstalledAppFlow.from_client_secrets_file(os.path.join(\n dir_pre, 'credentials.json'), SCOPES)\n creds = flow.run_local_server(port=0)\n with open(os.path.join(dir_pre, 'token.pickle'), 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef list_user_labels(service):\n \"\"\"\n :param service:\n List all labels the user has, default + custom\n \"\"\"\n logging.info('getting labels')\n labels = service.users().labels().list(userId='me').execute()\n label_list = [label['name'] for label in labels['labels']]\n return label_list\n\n\ndef list_email_ids_by_label(service, label, max_results=500):\n \"\"\"\n :param service:\n :param label:\n :param max_results:\n :return:\n \"\"\"\n email_ids = set()\n next_page_token = ''\n try:\n while True:\n response = service.users().messages().list(userId='me',\n labelIds=label, maxResults=max_results, pageToken=\n next_page_token).execute()\n for msg_id in response['messages']:\n email_ids.add(msg_id['id'])\n logging.debug(f'total message ids: {len(email_ids)}')\n if 'nextPageToken' in response:\n print('next page token:', response['nextPageToken'])\n next_page_token = response['nextPageToken']\n else:\n break\n except:\n print('cannot get emails with this label', label)\n return email_ids\n\n\ndef list_user_info(service):\n \"\"\"\n list basic user info\n :param service:\n :return:\n \"\"\"\n profile = service.users().getProfile(userId='me').execute()\n return profile\n",
"<import token>\n\n\ndef get_creds():\n \"\"\"\n Authenticates the user either by existing pickle token, or generate new one\n // TODO add instruction on how to get the credentials.json\n :return: creds\n \"\"\"\n SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']\n creds = None\n dir_pre = '../secrets'\n logging.debug(f'searching for creds in path: {os.getcwd()}')\n if os.path.exists(os.path.join(dir_pre, 'token.pickle')):\n with open(os.path.join(dir_pre, 'token.pickle'), 'rb') as token:\n creds = pickle.load(token)\n logging.info('opening token.pickle')\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n logging.info('token expired, refreshing token')\n creds.refresh(Request())\n else:\n logging.info('token not found, re authenticating ')\n flow = InstalledAppFlow.from_client_secrets_file(os.path.join(\n dir_pre, 'credentials.json'), SCOPES)\n creds = flow.run_local_server(port=0)\n with open(os.path.join(dir_pre, 'token.pickle'), 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef list_user_labels(service):\n \"\"\"\n :param service:\n List all labels the user has, default + custom\n \"\"\"\n logging.info('getting labels')\n labels = service.users().labels().list(userId='me').execute()\n label_list = [label['name'] for label in labels['labels']]\n return label_list\n\n\ndef list_email_ids_by_label(service, label, max_results=500):\n \"\"\"\n :param service:\n :param label:\n :param max_results:\n :return:\n \"\"\"\n email_ids = set()\n next_page_token = ''\n try:\n while True:\n response = service.users().messages().list(userId='me',\n labelIds=label, maxResults=max_results, pageToken=\n next_page_token).execute()\n for msg_id in response['messages']:\n email_ids.add(msg_id['id'])\n logging.debug(f'total message ids: {len(email_ids)}')\n if 'nextPageToken' in response:\n print('next page token:', response['nextPageToken'])\n next_page_token = response['nextPageToken']\n else:\n break\n except:\n print('cannot get emails with this label', label)\n return email_ids\n\n\ndef list_user_info(service):\n \"\"\"\n list basic user info\n :param service:\n :return:\n \"\"\"\n profile = service.users().getProfile(userId='me').execute()\n return profile\n",
"<import token>\n\n\ndef get_creds():\n \"\"\"\n Authenticates the user either by existing pickle token, or generate new one\n // TODO add instruction on how to get the credentials.json\n :return: creds\n \"\"\"\n SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']\n creds = None\n dir_pre = '../secrets'\n logging.debug(f'searching for creds in path: {os.getcwd()}')\n if os.path.exists(os.path.join(dir_pre, 'token.pickle')):\n with open(os.path.join(dir_pre, 'token.pickle'), 'rb') as token:\n creds = pickle.load(token)\n logging.info('opening token.pickle')\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n logging.info('token expired, refreshing token')\n creds.refresh(Request())\n else:\n logging.info('token not found, re authenticating ')\n flow = InstalledAppFlow.from_client_secrets_file(os.path.join(\n dir_pre, 'credentials.json'), SCOPES)\n creds = flow.run_local_server(port=0)\n with open(os.path.join(dir_pre, 'token.pickle'), 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef list_user_labels(service):\n \"\"\"\n :param service:\n List all labels the user has, default + custom\n \"\"\"\n logging.info('getting labels')\n labels = service.users().labels().list(userId='me').execute()\n label_list = [label['name'] for label in labels['labels']]\n return label_list\n\n\n<function token>\n\n\ndef list_user_info(service):\n \"\"\"\n list basic user info\n :param service:\n :return:\n \"\"\"\n profile = service.users().getProfile(userId='me').execute()\n return profile\n",
"<import token>\n\n\ndef get_creds():\n \"\"\"\n Authenticates the user either by existing pickle token, or generate new one\n // TODO add instruction on how to get the credentials.json\n :return: creds\n \"\"\"\n SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']\n creds = None\n dir_pre = '../secrets'\n logging.debug(f'searching for creds in path: {os.getcwd()}')\n if os.path.exists(os.path.join(dir_pre, 'token.pickle')):\n with open(os.path.join(dir_pre, 'token.pickle'), 'rb') as token:\n creds = pickle.load(token)\n logging.info('opening token.pickle')\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n logging.info('token expired, refreshing token')\n creds.refresh(Request())\n else:\n logging.info('token not found, re authenticating ')\n flow = InstalledAppFlow.from_client_secrets_file(os.path.join(\n dir_pre, 'credentials.json'), SCOPES)\n creds = flow.run_local_server(port=0)\n with open(os.path.join(dir_pre, 'token.pickle'), 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\n<function token>\n<function token>\n\n\ndef list_user_info(service):\n \"\"\"\n list basic user info\n :param service:\n :return:\n \"\"\"\n profile = service.users().getProfile(userId='me').execute()\n return profile\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef list_user_info(service):\n \"\"\"\n list basic user info\n :param service:\n :return:\n \"\"\"\n profile = service.users().getProfile(userId='me').execute()\n return profile\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,317 |
d179834a2600b2489769f981943d0a71704bba27
|
import numpy as np
import pandas
from scipy import signal
from scipy.stats import kurtosis
from statsmodels.robust.scale import mad
import pywt
import pybursts
import matplotlib.pyplot as plt
class Kurtoburst(object):
""" Class to detect the peak using the kurtoburst method."""
def __init__(self,filename):
self.filename = filename
self.raw_data = pandas.read_csv(self.filename)
#self.colname = ' AU12_r'
#sself.colname = ' AU06_r'
#self.colname = ' AU07_r'
self.colname = ' AU02_r'
self.time = np.array(self.raw_data[' timestamp'][::10])
self.input = np.array(self.raw_data[self.colname][::10])
self.len = len(self.input)
self.nwin = 51
self.wave_type = 'sym3'
self.TK = 0.5
self.TT = 0.15
self.burst_s = 2 # burst s parameter
self.burst_gamma = 0.05 # burst gamma parameters
def remove_baseline(self):
"""Remove the base line using a Savitzky-Golay method"""
print(" \t Apply Savitzky-Golay filter \t %d" %self.nwin)
base_savgol = signal.savgol_filter(self.input, self.nwin, 1)
self.input_nobase = self.input - base_savgol
def denoise(self):
"""denoise the data using the 2stage kurtosis denoising"""
#make sure the data has a len dividible by 2^2
self.len_swt = self.len
while not (self.len_swt/4).is_integer():
self.len_swt -= 1
inp = self.input_nobase[:self.len_swt]
self.wave = pywt.Wavelet(self.wave_type)
nLevel = pywt.swt_max_level(self.len_swt)
self.coeffs = pywt.swt(inp,self.wave,level=2)
print(" \t Denoise STW coefficients \t %1.2f %1.2f" %(self.TK,self.TT))
(cA2, cD2), (cA1, cD1) = self.coeffs
# rolling kurtosis
k2 = self._rolling_kts(cD2,self.nwin)
k1 = self._rolling_kts(cD1,self.nwin)
# thresholding
cD2[k2<self.TK] = 0
cD1[k1<self.TK] = 0
cA2[k2<self.TK] = 0
cA1[k1<self.TK] = 0
# universal threshold
sigma_roll_1 = mad(cD1[cD1!=0])*np.ones(self.len_swt)
uthresh_roll_1 = self.TT * sigma_roll_1 * np.sqrt(2*np.log(self.len_swt))
cD1[abs(cD1)<uthresh_roll_1] = 0
# universal threshold
sigma_roll_2 = mad(cD2[cD2!=0])*np.ones(self.len_swt)
uthresh_roll_2 = self.TT * sigma_roll_2 * np.sqrt(2*np.log(self.len_swt))
cD2[abs(cD2)<uthresh_roll_2] = 0
# final threshold
cA1[cD1 == 0] = 0
cA2[cD2 == 0] = 0
self.denoised_coeffs = [(cA1,cD1),(cA2,cD2)]
# denoise the data
#self.input_denoised = self._iswt(self.denoised_coeffs,self.wave)
self.input_denoised = pywt.iswt(self.denoised_coeffs,self.wave)
def get_burst(self):
"""Detect bursts of activity."""
print('\t Detect bursts \t\t\t %d %1.2f' %(self.burst_s,self.burst_gamma))
# compute the cum sum of the positive values of the datan ...
_tmp = np.copy(self.input_denoised)
_tmp[_tmp<0] = 0
_tmp += 1E-12
self.input_cummulative = np.cumsum(_tmp)
# decimation ...
self.T_cummulative = np.copy(self.time[0:-1:10])
self.input_cummulative = self.input_cummulative[0:-1:10]
# burst calculation
self.burst = pybursts.kleinberg(self.input_cummulative,s=int(self.burst_s),gamma=self.burst_gamma)
Tbursts = []
for b in self.burst:
if b[0] == 1:
ti = self.T_cummulative[np.argwhere(self.input_cummulative==b[1])[0]]
tf = self.T_cummulative[np.argwhere(self.input_cummulative==b[2])[0]]
Tbursts.append([ti[0],tf[0]])
########################################
## detect the peaks
########################################
x_peak_bursts = []
y_peak_bursts = []
print(Tbursts)
if len(Tbursts)>0:
for i in range(len(Tbursts)-1):
ind_init = np.argmin(abs(self.time-Tbursts[i][1]))
ind_final = np.argmin(abs(self.time-Tbursts[i+1][0]))
x_peak_bursts.append( self.time[ ind_init + np.argmax(self.input_denoised[ind_init:ind_final])] )
y_peak_bursts.append( self.input[ind_init + np.argmax(self.input_denoised[ind_init:ind_final])] )
else:
print('\t no peaks found in the bursts')
self.xpeak = x_peak_bursts
self.ypeak = y_peak_bursts
@staticmethod
def _rolling_kts(y,N):
"""Compute the rolling kurtosis."""
# number of points
nPTS,N2 = len(y), int(N/2)
# define the out
kts = np.zeros(nPTS)
# for all points comopute snr
for i in range(nPTS):
s,e = i-N2, i+N2
if s<0:
s = 0
if s > nPTS-1:
s = nPTS-1
win = np.ones(len(y[s:e]))
kts[i] = kurtosis(win*y[s:e])
return kts
def plot(self):
plt.plot(self.time,self.input)
#plt.plot(self.time,self.input_nobase-1,linewidth=0.5)
plt.scatter(self.xpeak,self.ypeak,c='orange')
ypeak = np.zeros_like(self.time)
for p in self.xpeak:
ypeak[self.time==p] = 0.5
plt.plot(self.time,ypeak-1,c='orange')
#plt.plot(self.T_cummulative,self.input_cummulative)
#plt.plot(self.time[:self.len_swt],self.input_denoised,c='black')
plt.show()
if __name__ == '__main__':
filename = '003_VL.csv'
kb = Kurtoburst(filename)
kb.remove_baseline()
kb.denoise()
tb = kb.get_burst()
kb.plot()
print(tb)
|
[
"import numpy as np\nimport pandas\n\nfrom scipy import signal\nfrom scipy.stats import kurtosis\n\nfrom statsmodels.robust.scale import mad\n\nimport pywt\nimport pybursts\nimport matplotlib.pyplot as plt\n\nclass Kurtoburst(object):\n \"\"\" Class to detect the peak using the kurtoburst method.\"\"\"\n\n def __init__(self,filename):\n\n self.filename = filename\n self.raw_data = pandas.read_csv(self.filename)\n #self.colname = ' AU12_r'\n #sself.colname = ' AU06_r'\n #self.colname = ' AU07_r'\n self.colname = ' AU02_r'\n self.time = np.array(self.raw_data[' timestamp'][::10])\n self.input = np.array(self.raw_data[self.colname][::10])\n self.len = len(self.input)\n\n self.nwin = 51\n self.wave_type = 'sym3'\n self.TK = 0.5\n self.TT = 0.15\n\n self.burst_s = 2 # burst s parameter\n self.burst_gamma = 0.05 # burst gamma parameters\n\n def remove_baseline(self):\n \"\"\"Remove the base line using a Savitzky-Golay method\"\"\"\n\n print(\" \\t Apply Savitzky-Golay filter \\t %d\" %self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol\n\n def denoise(self):\n \"\"\"denoise the data using the 2stage kurtosis denoising\"\"\"\n\n #make sure the data has a len dividible by 2^2\n self.len_swt = self.len\n while not (self.len_swt/4).is_integer():\n self.len_swt -= 1\n\n inp = self.input_nobase[:self.len_swt]\n self.wave = pywt.Wavelet(self.wave_type)\n nLevel = pywt.swt_max_level(self.len_swt)\n self.coeffs = pywt.swt(inp,self.wave,level=2)\n\n print(\" \\t Denoise STW coefficients \\t %1.2f %1.2f\" %(self.TK,self.TT))\n (cA2, cD2), (cA1, cD1) = self.coeffs\n\n # rolling kurtosis\n k2 = self._rolling_kts(cD2,self.nwin)\n k1 = self._rolling_kts(cD1,self.nwin)\n\n # thresholding\n cD2[k2<self.TK] = 0\n cD1[k1<self.TK] = 0\n\n cA2[k2<self.TK] = 0\n cA1[k1<self.TK] = 0\n\n # universal threshold\n sigma_roll_1 = mad(cD1[cD1!=0])*np.ones(self.len_swt)\n uthresh_roll_1 = self.TT * sigma_roll_1 * np.sqrt(2*np.log(self.len_swt))\n cD1[abs(cD1)<uthresh_roll_1] = 0\n\n # universal threshold\n sigma_roll_2 = mad(cD2[cD2!=0])*np.ones(self.len_swt)\n uthresh_roll_2 = self.TT * sigma_roll_2 * np.sqrt(2*np.log(self.len_swt))\n cD2[abs(cD2)<uthresh_roll_2] = 0\n\n # final threshold\n cA1[cD1 == 0] = 0\n cA2[cD2 == 0] = 0\n self.denoised_coeffs = [(cA1,cD1),(cA2,cD2)]\n\n # denoise the data\n #self.input_denoised = self._iswt(self.denoised_coeffs,self.wave)\n self.input_denoised = pywt.iswt(self.denoised_coeffs,self.wave)\n\n def get_burst(self):\n \"\"\"Detect bursts of activity.\"\"\"\n\n print('\\t Detect bursts \\t\\t\\t %d %1.2f' %(self.burst_s,self.burst_gamma))\n\n # compute the cum sum of the positive values of the datan ...\n _tmp = np.copy(self.input_denoised)\n _tmp[_tmp<0] = 0\n _tmp += 1E-12\n self.input_cummulative = np.cumsum(_tmp)\n\n # decimation ...\n self.T_cummulative = np.copy(self.time[0:-1:10])\n self.input_cummulative = self.input_cummulative[0:-1:10]\n\n # burst calculation\n self.burst = pybursts.kleinberg(self.input_cummulative,s=int(self.burst_s),gamma=self.burst_gamma)\n\n\n Tbursts = []\n for b in self.burst:\n if b[0] == 1:\n ti = self.T_cummulative[np.argwhere(self.input_cummulative==b[1])[0]]\n tf = self.T_cummulative[np.argwhere(self.input_cummulative==b[2])[0]]\n Tbursts.append([ti[0],tf[0]])\n\n ########################################\n ## detect the peaks\n ########################################\n x_peak_bursts = []\n y_peak_bursts = []\n print(Tbursts)\n if len(Tbursts)>0:\n for i in range(len(Tbursts)-1):\n ind_init = np.argmin(abs(self.time-Tbursts[i][1]))\n ind_final = np.argmin(abs(self.time-Tbursts[i+1][0]))\n\n x_peak_bursts.append( self.time[ ind_init + np.argmax(self.input_denoised[ind_init:ind_final])] )\n y_peak_bursts.append( self.input[ind_init + np.argmax(self.input_denoised[ind_init:ind_final])] )\n else:\n print('\\t no peaks found in the bursts')\n\n self.xpeak = x_peak_bursts\n self.ypeak = y_peak_bursts\n\n\n\n @staticmethod\n def _rolling_kts(y,N):\n \"\"\"Compute the rolling kurtosis.\"\"\"\n\n # number of points\n nPTS,N2 = len(y), int(N/2)\n\n # define the out\n kts = np.zeros(nPTS)\n\n # for all points comopute snr\n for i in range(nPTS):\n s,e = i-N2, i+N2\n if s<0:\n s = 0\n if s > nPTS-1:\n s = nPTS-1\n win = np.ones(len(y[s:e]))\n kts[i] = kurtosis(win*y[s:e])\n return kts\n\n def plot(self):\n plt.plot(self.time,self.input)\n #plt.plot(self.time,self.input_nobase-1,linewidth=0.5)\n plt.scatter(self.xpeak,self.ypeak,c='orange')\n ypeak = np.zeros_like(self.time)\n for p in self.xpeak:\n ypeak[self.time==p] = 0.5\n plt.plot(self.time,ypeak-1,c='orange')\n #plt.plot(self.T_cummulative,self.input_cummulative)\n #plt.plot(self.time[:self.len_swt],self.input_denoised,c='black')\n plt.show()\n\n\n\n\nif __name__ == '__main__':\n filename = '003_VL.csv'\n kb = Kurtoburst(filename)\n kb.remove_baseline()\n kb.denoise()\n tb = kb.get_burst()\n kb.plot()\n print(tb)",
"import numpy as np\nimport pandas\nfrom scipy import signal\nfrom scipy.stats import kurtosis\nfrom statsmodels.robust.scale import mad\nimport pywt\nimport pybursts\nimport matplotlib.pyplot as plt\n\n\nclass Kurtoburst(object):\n \"\"\" Class to detect the peak using the kurtoburst method.\"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.raw_data = pandas.read_csv(self.filename)\n self.colname = ' AU02_r'\n self.time = np.array(self.raw_data[' timestamp'][::10])\n self.input = np.array(self.raw_data[self.colname][::10])\n self.len = len(self.input)\n self.nwin = 51\n self.wave_type = 'sym3'\n self.TK = 0.5\n self.TT = 0.15\n self.burst_s = 2\n self.burst_gamma = 0.05\n\n def remove_baseline(self):\n \"\"\"Remove the base line using a Savitzky-Golay method\"\"\"\n print(' \\t Apply Savitzky-Golay filter \\t %d' % self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol\n\n def denoise(self):\n \"\"\"denoise the data using the 2stage kurtosis denoising\"\"\"\n self.len_swt = self.len\n while not (self.len_swt / 4).is_integer():\n self.len_swt -= 1\n inp = self.input_nobase[:self.len_swt]\n self.wave = pywt.Wavelet(self.wave_type)\n nLevel = pywt.swt_max_level(self.len_swt)\n self.coeffs = pywt.swt(inp, self.wave, level=2)\n print(' \\t Denoise STW coefficients \\t %1.2f %1.2f' % (self.TK,\n self.TT))\n (cA2, cD2), (cA1, cD1) = self.coeffs\n k2 = self._rolling_kts(cD2, self.nwin)\n k1 = self._rolling_kts(cD1, self.nwin)\n cD2[k2 < self.TK] = 0\n cD1[k1 < self.TK] = 0\n cA2[k2 < self.TK] = 0\n cA1[k1 < self.TK] = 0\n sigma_roll_1 = mad(cD1[cD1 != 0]) * np.ones(self.len_swt)\n uthresh_roll_1 = self.TT * sigma_roll_1 * np.sqrt(2 * np.log(self.\n len_swt))\n cD1[abs(cD1) < uthresh_roll_1] = 0\n sigma_roll_2 = mad(cD2[cD2 != 0]) * np.ones(self.len_swt)\n uthresh_roll_2 = self.TT * sigma_roll_2 * np.sqrt(2 * np.log(self.\n len_swt))\n cD2[abs(cD2) < uthresh_roll_2] = 0\n cA1[cD1 == 0] = 0\n cA2[cD2 == 0] = 0\n self.denoised_coeffs = [(cA1, cD1), (cA2, cD2)]\n self.input_denoised = pywt.iswt(self.denoised_coeffs, self.wave)\n\n def get_burst(self):\n \"\"\"Detect bursts of activity.\"\"\"\n print('\\t Detect bursts \\t\\t\\t %d %1.2f' % (self.burst_s, self.\n burst_gamma))\n _tmp = np.copy(self.input_denoised)\n _tmp[_tmp < 0] = 0\n _tmp += 1e-12\n self.input_cummulative = np.cumsum(_tmp)\n self.T_cummulative = np.copy(self.time[0:-1:10])\n self.input_cummulative = self.input_cummulative[0:-1:10]\n self.burst = pybursts.kleinberg(self.input_cummulative, s=int(self.\n burst_s), gamma=self.burst_gamma)\n Tbursts = []\n for b in self.burst:\n if b[0] == 1:\n ti = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[1])[0]]\n tf = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[2])[0]]\n Tbursts.append([ti[0], tf[0]])\n x_peak_bursts = []\n y_peak_bursts = []\n print(Tbursts)\n if len(Tbursts) > 0:\n for i in range(len(Tbursts) - 1):\n ind_init = np.argmin(abs(self.time - Tbursts[i][1]))\n ind_final = np.argmin(abs(self.time - Tbursts[i + 1][0]))\n x_peak_bursts.append(self.time[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n y_peak_bursts.append(self.input[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n else:\n print('\\t no peaks found in the bursts')\n self.xpeak = x_peak_bursts\n self.ypeak = y_peak_bursts\n\n @staticmethod\n def _rolling_kts(y, N):\n \"\"\"Compute the rolling kurtosis.\"\"\"\n nPTS, N2 = len(y), int(N / 2)\n kts = np.zeros(nPTS)\n for i in range(nPTS):\n s, e = i - N2, i + N2\n if s < 0:\n s = 0\n if s > nPTS - 1:\n s = nPTS - 1\n win = np.ones(len(y[s:e]))\n kts[i] = kurtosis(win * y[s:e])\n return kts\n\n def plot(self):\n plt.plot(self.time, self.input)\n plt.scatter(self.xpeak, self.ypeak, c='orange')\n ypeak = np.zeros_like(self.time)\n for p in self.xpeak:\n ypeak[self.time == p] = 0.5\n plt.plot(self.time, ypeak - 1, c='orange')\n plt.show()\n\n\nif __name__ == '__main__':\n filename = '003_VL.csv'\n kb = Kurtoburst(filename)\n kb.remove_baseline()\n kb.denoise()\n tb = kb.get_burst()\n kb.plot()\n print(tb)\n",
"<import token>\n\n\nclass Kurtoburst(object):\n \"\"\" Class to detect the peak using the kurtoburst method.\"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.raw_data = pandas.read_csv(self.filename)\n self.colname = ' AU02_r'\n self.time = np.array(self.raw_data[' timestamp'][::10])\n self.input = np.array(self.raw_data[self.colname][::10])\n self.len = len(self.input)\n self.nwin = 51\n self.wave_type = 'sym3'\n self.TK = 0.5\n self.TT = 0.15\n self.burst_s = 2\n self.burst_gamma = 0.05\n\n def remove_baseline(self):\n \"\"\"Remove the base line using a Savitzky-Golay method\"\"\"\n print(' \\t Apply Savitzky-Golay filter \\t %d' % self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol\n\n def denoise(self):\n \"\"\"denoise the data using the 2stage kurtosis denoising\"\"\"\n self.len_swt = self.len\n while not (self.len_swt / 4).is_integer():\n self.len_swt -= 1\n inp = self.input_nobase[:self.len_swt]\n self.wave = pywt.Wavelet(self.wave_type)\n nLevel = pywt.swt_max_level(self.len_swt)\n self.coeffs = pywt.swt(inp, self.wave, level=2)\n print(' \\t Denoise STW coefficients \\t %1.2f %1.2f' % (self.TK,\n self.TT))\n (cA2, cD2), (cA1, cD1) = self.coeffs\n k2 = self._rolling_kts(cD2, self.nwin)\n k1 = self._rolling_kts(cD1, self.nwin)\n cD2[k2 < self.TK] = 0\n cD1[k1 < self.TK] = 0\n cA2[k2 < self.TK] = 0\n cA1[k1 < self.TK] = 0\n sigma_roll_1 = mad(cD1[cD1 != 0]) * np.ones(self.len_swt)\n uthresh_roll_1 = self.TT * sigma_roll_1 * np.sqrt(2 * np.log(self.\n len_swt))\n cD1[abs(cD1) < uthresh_roll_1] = 0\n sigma_roll_2 = mad(cD2[cD2 != 0]) * np.ones(self.len_swt)\n uthresh_roll_2 = self.TT * sigma_roll_2 * np.sqrt(2 * np.log(self.\n len_swt))\n cD2[abs(cD2) < uthresh_roll_2] = 0\n cA1[cD1 == 0] = 0\n cA2[cD2 == 0] = 0\n self.denoised_coeffs = [(cA1, cD1), (cA2, cD2)]\n self.input_denoised = pywt.iswt(self.denoised_coeffs, self.wave)\n\n def get_burst(self):\n \"\"\"Detect bursts of activity.\"\"\"\n print('\\t Detect bursts \\t\\t\\t %d %1.2f' % (self.burst_s, self.\n burst_gamma))\n _tmp = np.copy(self.input_denoised)\n _tmp[_tmp < 0] = 0\n _tmp += 1e-12\n self.input_cummulative = np.cumsum(_tmp)\n self.T_cummulative = np.copy(self.time[0:-1:10])\n self.input_cummulative = self.input_cummulative[0:-1:10]\n self.burst = pybursts.kleinberg(self.input_cummulative, s=int(self.\n burst_s), gamma=self.burst_gamma)\n Tbursts = []\n for b in self.burst:\n if b[0] == 1:\n ti = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[1])[0]]\n tf = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[2])[0]]\n Tbursts.append([ti[0], tf[0]])\n x_peak_bursts = []\n y_peak_bursts = []\n print(Tbursts)\n if len(Tbursts) > 0:\n for i in range(len(Tbursts) - 1):\n ind_init = np.argmin(abs(self.time - Tbursts[i][1]))\n ind_final = np.argmin(abs(self.time - Tbursts[i + 1][0]))\n x_peak_bursts.append(self.time[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n y_peak_bursts.append(self.input[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n else:\n print('\\t no peaks found in the bursts')\n self.xpeak = x_peak_bursts\n self.ypeak = y_peak_bursts\n\n @staticmethod\n def _rolling_kts(y, N):\n \"\"\"Compute the rolling kurtosis.\"\"\"\n nPTS, N2 = len(y), int(N / 2)\n kts = np.zeros(nPTS)\n for i in range(nPTS):\n s, e = i - N2, i + N2\n if s < 0:\n s = 0\n if s > nPTS - 1:\n s = nPTS - 1\n win = np.ones(len(y[s:e]))\n kts[i] = kurtosis(win * y[s:e])\n return kts\n\n def plot(self):\n plt.plot(self.time, self.input)\n plt.scatter(self.xpeak, self.ypeak, c='orange')\n ypeak = np.zeros_like(self.time)\n for p in self.xpeak:\n ypeak[self.time == p] = 0.5\n plt.plot(self.time, ypeak - 1, c='orange')\n plt.show()\n\n\nif __name__ == '__main__':\n filename = '003_VL.csv'\n kb = Kurtoburst(filename)\n kb.remove_baseline()\n kb.denoise()\n tb = kb.get_burst()\n kb.plot()\n print(tb)\n",
"<import token>\n\n\nclass Kurtoburst(object):\n \"\"\" Class to detect the peak using the kurtoburst method.\"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.raw_data = pandas.read_csv(self.filename)\n self.colname = ' AU02_r'\n self.time = np.array(self.raw_data[' timestamp'][::10])\n self.input = np.array(self.raw_data[self.colname][::10])\n self.len = len(self.input)\n self.nwin = 51\n self.wave_type = 'sym3'\n self.TK = 0.5\n self.TT = 0.15\n self.burst_s = 2\n self.burst_gamma = 0.05\n\n def remove_baseline(self):\n \"\"\"Remove the base line using a Savitzky-Golay method\"\"\"\n print(' \\t Apply Savitzky-Golay filter \\t %d' % self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol\n\n def denoise(self):\n \"\"\"denoise the data using the 2stage kurtosis denoising\"\"\"\n self.len_swt = self.len\n while not (self.len_swt / 4).is_integer():\n self.len_swt -= 1\n inp = self.input_nobase[:self.len_swt]\n self.wave = pywt.Wavelet(self.wave_type)\n nLevel = pywt.swt_max_level(self.len_swt)\n self.coeffs = pywt.swt(inp, self.wave, level=2)\n print(' \\t Denoise STW coefficients \\t %1.2f %1.2f' % (self.TK,\n self.TT))\n (cA2, cD2), (cA1, cD1) = self.coeffs\n k2 = self._rolling_kts(cD2, self.nwin)\n k1 = self._rolling_kts(cD1, self.nwin)\n cD2[k2 < self.TK] = 0\n cD1[k1 < self.TK] = 0\n cA2[k2 < self.TK] = 0\n cA1[k1 < self.TK] = 0\n sigma_roll_1 = mad(cD1[cD1 != 0]) * np.ones(self.len_swt)\n uthresh_roll_1 = self.TT * sigma_roll_1 * np.sqrt(2 * np.log(self.\n len_swt))\n cD1[abs(cD1) < uthresh_roll_1] = 0\n sigma_roll_2 = mad(cD2[cD2 != 0]) * np.ones(self.len_swt)\n uthresh_roll_2 = self.TT * sigma_roll_2 * np.sqrt(2 * np.log(self.\n len_swt))\n cD2[abs(cD2) < uthresh_roll_2] = 0\n cA1[cD1 == 0] = 0\n cA2[cD2 == 0] = 0\n self.denoised_coeffs = [(cA1, cD1), (cA2, cD2)]\n self.input_denoised = pywt.iswt(self.denoised_coeffs, self.wave)\n\n def get_burst(self):\n \"\"\"Detect bursts of activity.\"\"\"\n print('\\t Detect bursts \\t\\t\\t %d %1.2f' % (self.burst_s, self.\n burst_gamma))\n _tmp = np.copy(self.input_denoised)\n _tmp[_tmp < 0] = 0\n _tmp += 1e-12\n self.input_cummulative = np.cumsum(_tmp)\n self.T_cummulative = np.copy(self.time[0:-1:10])\n self.input_cummulative = self.input_cummulative[0:-1:10]\n self.burst = pybursts.kleinberg(self.input_cummulative, s=int(self.\n burst_s), gamma=self.burst_gamma)\n Tbursts = []\n for b in self.burst:\n if b[0] == 1:\n ti = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[1])[0]]\n tf = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[2])[0]]\n Tbursts.append([ti[0], tf[0]])\n x_peak_bursts = []\n y_peak_bursts = []\n print(Tbursts)\n if len(Tbursts) > 0:\n for i in range(len(Tbursts) - 1):\n ind_init = np.argmin(abs(self.time - Tbursts[i][1]))\n ind_final = np.argmin(abs(self.time - Tbursts[i + 1][0]))\n x_peak_bursts.append(self.time[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n y_peak_bursts.append(self.input[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n else:\n print('\\t no peaks found in the bursts')\n self.xpeak = x_peak_bursts\n self.ypeak = y_peak_bursts\n\n @staticmethod\n def _rolling_kts(y, N):\n \"\"\"Compute the rolling kurtosis.\"\"\"\n nPTS, N2 = len(y), int(N / 2)\n kts = np.zeros(nPTS)\n for i in range(nPTS):\n s, e = i - N2, i + N2\n if s < 0:\n s = 0\n if s > nPTS - 1:\n s = nPTS - 1\n win = np.ones(len(y[s:e]))\n kts[i] = kurtosis(win * y[s:e])\n return kts\n\n def plot(self):\n plt.plot(self.time, self.input)\n plt.scatter(self.xpeak, self.ypeak, c='orange')\n ypeak = np.zeros_like(self.time)\n for p in self.xpeak:\n ypeak[self.time == p] = 0.5\n plt.plot(self.time, ypeak - 1, c='orange')\n plt.show()\n\n\n<code token>\n",
"<import token>\n\n\nclass Kurtoburst(object):\n <docstring token>\n\n def __init__(self, filename):\n self.filename = filename\n self.raw_data = pandas.read_csv(self.filename)\n self.colname = ' AU02_r'\n self.time = np.array(self.raw_data[' timestamp'][::10])\n self.input = np.array(self.raw_data[self.colname][::10])\n self.len = len(self.input)\n self.nwin = 51\n self.wave_type = 'sym3'\n self.TK = 0.5\n self.TT = 0.15\n self.burst_s = 2\n self.burst_gamma = 0.05\n\n def remove_baseline(self):\n \"\"\"Remove the base line using a Savitzky-Golay method\"\"\"\n print(' \\t Apply Savitzky-Golay filter \\t %d' % self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol\n\n def denoise(self):\n \"\"\"denoise the data using the 2stage kurtosis denoising\"\"\"\n self.len_swt = self.len\n while not (self.len_swt / 4).is_integer():\n self.len_swt -= 1\n inp = self.input_nobase[:self.len_swt]\n self.wave = pywt.Wavelet(self.wave_type)\n nLevel = pywt.swt_max_level(self.len_swt)\n self.coeffs = pywt.swt(inp, self.wave, level=2)\n print(' \\t Denoise STW coefficients \\t %1.2f %1.2f' % (self.TK,\n self.TT))\n (cA2, cD2), (cA1, cD1) = self.coeffs\n k2 = self._rolling_kts(cD2, self.nwin)\n k1 = self._rolling_kts(cD1, self.nwin)\n cD2[k2 < self.TK] = 0\n cD1[k1 < self.TK] = 0\n cA2[k2 < self.TK] = 0\n cA1[k1 < self.TK] = 0\n sigma_roll_1 = mad(cD1[cD1 != 0]) * np.ones(self.len_swt)\n uthresh_roll_1 = self.TT * sigma_roll_1 * np.sqrt(2 * np.log(self.\n len_swt))\n cD1[abs(cD1) < uthresh_roll_1] = 0\n sigma_roll_2 = mad(cD2[cD2 != 0]) * np.ones(self.len_swt)\n uthresh_roll_2 = self.TT * sigma_roll_2 * np.sqrt(2 * np.log(self.\n len_swt))\n cD2[abs(cD2) < uthresh_roll_2] = 0\n cA1[cD1 == 0] = 0\n cA2[cD2 == 0] = 0\n self.denoised_coeffs = [(cA1, cD1), (cA2, cD2)]\n self.input_denoised = pywt.iswt(self.denoised_coeffs, self.wave)\n\n def get_burst(self):\n \"\"\"Detect bursts of activity.\"\"\"\n print('\\t Detect bursts \\t\\t\\t %d %1.2f' % (self.burst_s, self.\n burst_gamma))\n _tmp = np.copy(self.input_denoised)\n _tmp[_tmp < 0] = 0\n _tmp += 1e-12\n self.input_cummulative = np.cumsum(_tmp)\n self.T_cummulative = np.copy(self.time[0:-1:10])\n self.input_cummulative = self.input_cummulative[0:-1:10]\n self.burst = pybursts.kleinberg(self.input_cummulative, s=int(self.\n burst_s), gamma=self.burst_gamma)\n Tbursts = []\n for b in self.burst:\n if b[0] == 1:\n ti = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[1])[0]]\n tf = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[2])[0]]\n Tbursts.append([ti[0], tf[0]])\n x_peak_bursts = []\n y_peak_bursts = []\n print(Tbursts)\n if len(Tbursts) > 0:\n for i in range(len(Tbursts) - 1):\n ind_init = np.argmin(abs(self.time - Tbursts[i][1]))\n ind_final = np.argmin(abs(self.time - Tbursts[i + 1][0]))\n x_peak_bursts.append(self.time[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n y_peak_bursts.append(self.input[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n else:\n print('\\t no peaks found in the bursts')\n self.xpeak = x_peak_bursts\n self.ypeak = y_peak_bursts\n\n @staticmethod\n def _rolling_kts(y, N):\n \"\"\"Compute the rolling kurtosis.\"\"\"\n nPTS, N2 = len(y), int(N / 2)\n kts = np.zeros(nPTS)\n for i in range(nPTS):\n s, e = i - N2, i + N2\n if s < 0:\n s = 0\n if s > nPTS - 1:\n s = nPTS - 1\n win = np.ones(len(y[s:e]))\n kts[i] = kurtosis(win * y[s:e])\n return kts\n\n def plot(self):\n plt.plot(self.time, self.input)\n plt.scatter(self.xpeak, self.ypeak, c='orange')\n ypeak = np.zeros_like(self.time)\n for p in self.xpeak:\n ypeak[self.time == p] = 0.5\n plt.plot(self.time, ypeak - 1, c='orange')\n plt.show()\n\n\n<code token>\n",
"<import token>\n\n\nclass Kurtoburst(object):\n <docstring token>\n\n def __init__(self, filename):\n self.filename = filename\n self.raw_data = pandas.read_csv(self.filename)\n self.colname = ' AU02_r'\n self.time = np.array(self.raw_data[' timestamp'][::10])\n self.input = np.array(self.raw_data[self.colname][::10])\n self.len = len(self.input)\n self.nwin = 51\n self.wave_type = 'sym3'\n self.TK = 0.5\n self.TT = 0.15\n self.burst_s = 2\n self.burst_gamma = 0.05\n\n def remove_baseline(self):\n \"\"\"Remove the base line using a Savitzky-Golay method\"\"\"\n print(' \\t Apply Savitzky-Golay filter \\t %d' % self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol\n\n def denoise(self):\n \"\"\"denoise the data using the 2stage kurtosis denoising\"\"\"\n self.len_swt = self.len\n while not (self.len_swt / 4).is_integer():\n self.len_swt -= 1\n inp = self.input_nobase[:self.len_swt]\n self.wave = pywt.Wavelet(self.wave_type)\n nLevel = pywt.swt_max_level(self.len_swt)\n self.coeffs = pywt.swt(inp, self.wave, level=2)\n print(' \\t Denoise STW coefficients \\t %1.2f %1.2f' % (self.TK,\n self.TT))\n (cA2, cD2), (cA1, cD1) = self.coeffs\n k2 = self._rolling_kts(cD2, self.nwin)\n k1 = self._rolling_kts(cD1, self.nwin)\n cD2[k2 < self.TK] = 0\n cD1[k1 < self.TK] = 0\n cA2[k2 < self.TK] = 0\n cA1[k1 < self.TK] = 0\n sigma_roll_1 = mad(cD1[cD1 != 0]) * np.ones(self.len_swt)\n uthresh_roll_1 = self.TT * sigma_roll_1 * np.sqrt(2 * np.log(self.\n len_swt))\n cD1[abs(cD1) < uthresh_roll_1] = 0\n sigma_roll_2 = mad(cD2[cD2 != 0]) * np.ones(self.len_swt)\n uthresh_roll_2 = self.TT * sigma_roll_2 * np.sqrt(2 * np.log(self.\n len_swt))\n cD2[abs(cD2) < uthresh_roll_2] = 0\n cA1[cD1 == 0] = 0\n cA2[cD2 == 0] = 0\n self.denoised_coeffs = [(cA1, cD1), (cA2, cD2)]\n self.input_denoised = pywt.iswt(self.denoised_coeffs, self.wave)\n\n def get_burst(self):\n \"\"\"Detect bursts of activity.\"\"\"\n print('\\t Detect bursts \\t\\t\\t %d %1.2f' % (self.burst_s, self.\n burst_gamma))\n _tmp = np.copy(self.input_denoised)\n _tmp[_tmp < 0] = 0\n _tmp += 1e-12\n self.input_cummulative = np.cumsum(_tmp)\n self.T_cummulative = np.copy(self.time[0:-1:10])\n self.input_cummulative = self.input_cummulative[0:-1:10]\n self.burst = pybursts.kleinberg(self.input_cummulative, s=int(self.\n burst_s), gamma=self.burst_gamma)\n Tbursts = []\n for b in self.burst:\n if b[0] == 1:\n ti = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[1])[0]]\n tf = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[2])[0]]\n Tbursts.append([ti[0], tf[0]])\n x_peak_bursts = []\n y_peak_bursts = []\n print(Tbursts)\n if len(Tbursts) > 0:\n for i in range(len(Tbursts) - 1):\n ind_init = np.argmin(abs(self.time - Tbursts[i][1]))\n ind_final = np.argmin(abs(self.time - Tbursts[i + 1][0]))\n x_peak_bursts.append(self.time[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n y_peak_bursts.append(self.input[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n else:\n print('\\t no peaks found in the bursts')\n self.xpeak = x_peak_bursts\n self.ypeak = y_peak_bursts\n <function token>\n\n def plot(self):\n plt.plot(self.time, self.input)\n plt.scatter(self.xpeak, self.ypeak, c='orange')\n ypeak = np.zeros_like(self.time)\n for p in self.xpeak:\n ypeak[self.time == p] = 0.5\n plt.plot(self.time, ypeak - 1, c='orange')\n plt.show()\n\n\n<code token>\n",
"<import token>\n\n\nclass Kurtoburst(object):\n <docstring token>\n\n def __init__(self, filename):\n self.filename = filename\n self.raw_data = pandas.read_csv(self.filename)\n self.colname = ' AU02_r'\n self.time = np.array(self.raw_data[' timestamp'][::10])\n self.input = np.array(self.raw_data[self.colname][::10])\n self.len = len(self.input)\n self.nwin = 51\n self.wave_type = 'sym3'\n self.TK = 0.5\n self.TT = 0.15\n self.burst_s = 2\n self.burst_gamma = 0.05\n\n def remove_baseline(self):\n \"\"\"Remove the base line using a Savitzky-Golay method\"\"\"\n print(' \\t Apply Savitzky-Golay filter \\t %d' % self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol\n\n def denoise(self):\n \"\"\"denoise the data using the 2stage kurtosis denoising\"\"\"\n self.len_swt = self.len\n while not (self.len_swt / 4).is_integer():\n self.len_swt -= 1\n inp = self.input_nobase[:self.len_swt]\n self.wave = pywt.Wavelet(self.wave_type)\n nLevel = pywt.swt_max_level(self.len_swt)\n self.coeffs = pywt.swt(inp, self.wave, level=2)\n print(' \\t Denoise STW coefficients \\t %1.2f %1.2f' % (self.TK,\n self.TT))\n (cA2, cD2), (cA1, cD1) = self.coeffs\n k2 = self._rolling_kts(cD2, self.nwin)\n k1 = self._rolling_kts(cD1, self.nwin)\n cD2[k2 < self.TK] = 0\n cD1[k1 < self.TK] = 0\n cA2[k2 < self.TK] = 0\n cA1[k1 < self.TK] = 0\n sigma_roll_1 = mad(cD1[cD1 != 0]) * np.ones(self.len_swt)\n uthresh_roll_1 = self.TT * sigma_roll_1 * np.sqrt(2 * np.log(self.\n len_swt))\n cD1[abs(cD1) < uthresh_roll_1] = 0\n sigma_roll_2 = mad(cD2[cD2 != 0]) * np.ones(self.len_swt)\n uthresh_roll_2 = self.TT * sigma_roll_2 * np.sqrt(2 * np.log(self.\n len_swt))\n cD2[abs(cD2) < uthresh_roll_2] = 0\n cA1[cD1 == 0] = 0\n cA2[cD2 == 0] = 0\n self.denoised_coeffs = [(cA1, cD1), (cA2, cD2)]\n self.input_denoised = pywt.iswt(self.denoised_coeffs, self.wave)\n\n def get_burst(self):\n \"\"\"Detect bursts of activity.\"\"\"\n print('\\t Detect bursts \\t\\t\\t %d %1.2f' % (self.burst_s, self.\n burst_gamma))\n _tmp = np.copy(self.input_denoised)\n _tmp[_tmp < 0] = 0\n _tmp += 1e-12\n self.input_cummulative = np.cumsum(_tmp)\n self.T_cummulative = np.copy(self.time[0:-1:10])\n self.input_cummulative = self.input_cummulative[0:-1:10]\n self.burst = pybursts.kleinberg(self.input_cummulative, s=int(self.\n burst_s), gamma=self.burst_gamma)\n Tbursts = []\n for b in self.burst:\n if b[0] == 1:\n ti = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[1])[0]]\n tf = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[2])[0]]\n Tbursts.append([ti[0], tf[0]])\n x_peak_bursts = []\n y_peak_bursts = []\n print(Tbursts)\n if len(Tbursts) > 0:\n for i in range(len(Tbursts) - 1):\n ind_init = np.argmin(abs(self.time - Tbursts[i][1]))\n ind_final = np.argmin(abs(self.time - Tbursts[i + 1][0]))\n x_peak_bursts.append(self.time[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n y_peak_bursts.append(self.input[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n else:\n print('\\t no peaks found in the bursts')\n self.xpeak = x_peak_bursts\n self.ypeak = y_peak_bursts\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass Kurtoburst(object):\n <docstring token>\n\n def __init__(self, filename):\n self.filename = filename\n self.raw_data = pandas.read_csv(self.filename)\n self.colname = ' AU02_r'\n self.time = np.array(self.raw_data[' timestamp'][::10])\n self.input = np.array(self.raw_data[self.colname][::10])\n self.len = len(self.input)\n self.nwin = 51\n self.wave_type = 'sym3'\n self.TK = 0.5\n self.TT = 0.15\n self.burst_s = 2\n self.burst_gamma = 0.05\n\n def remove_baseline(self):\n \"\"\"Remove the base line using a Savitzky-Golay method\"\"\"\n print(' \\t Apply Savitzky-Golay filter \\t %d' % self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol\n <function token>\n\n def get_burst(self):\n \"\"\"Detect bursts of activity.\"\"\"\n print('\\t Detect bursts \\t\\t\\t %d %1.2f' % (self.burst_s, self.\n burst_gamma))\n _tmp = np.copy(self.input_denoised)\n _tmp[_tmp < 0] = 0\n _tmp += 1e-12\n self.input_cummulative = np.cumsum(_tmp)\n self.T_cummulative = np.copy(self.time[0:-1:10])\n self.input_cummulative = self.input_cummulative[0:-1:10]\n self.burst = pybursts.kleinberg(self.input_cummulative, s=int(self.\n burst_s), gamma=self.burst_gamma)\n Tbursts = []\n for b in self.burst:\n if b[0] == 1:\n ti = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[1])[0]]\n tf = self.T_cummulative[np.argwhere(self.input_cummulative ==\n b[2])[0]]\n Tbursts.append([ti[0], tf[0]])\n x_peak_bursts = []\n y_peak_bursts = []\n print(Tbursts)\n if len(Tbursts) > 0:\n for i in range(len(Tbursts) - 1):\n ind_init = np.argmin(abs(self.time - Tbursts[i][1]))\n ind_final = np.argmin(abs(self.time - Tbursts[i + 1][0]))\n x_peak_bursts.append(self.time[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n y_peak_bursts.append(self.input[ind_init + np.argmax(self.\n input_denoised[ind_init:ind_final])])\n else:\n print('\\t no peaks found in the bursts')\n self.xpeak = x_peak_bursts\n self.ypeak = y_peak_bursts\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass Kurtoburst(object):\n <docstring token>\n\n def __init__(self, filename):\n self.filename = filename\n self.raw_data = pandas.read_csv(self.filename)\n self.colname = ' AU02_r'\n self.time = np.array(self.raw_data[' timestamp'][::10])\n self.input = np.array(self.raw_data[self.colname][::10])\n self.len = len(self.input)\n self.nwin = 51\n self.wave_type = 'sym3'\n self.TK = 0.5\n self.TT = 0.15\n self.burst_s = 2\n self.burst_gamma = 0.05\n\n def remove_baseline(self):\n \"\"\"Remove the base line using a Savitzky-Golay method\"\"\"\n print(' \\t Apply Savitzky-Golay filter \\t %d' % self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass Kurtoburst(object):\n <docstring token>\n\n def __init__(self, filename):\n self.filename = filename\n self.raw_data = pandas.read_csv(self.filename)\n self.colname = ' AU02_r'\n self.time = np.array(self.raw_data[' timestamp'][::10])\n self.input = np.array(self.raw_data[self.colname][::10])\n self.len = len(self.input)\n self.nwin = 51\n self.wave_type = 'sym3'\n self.TK = 0.5\n self.TT = 0.15\n self.burst_s = 2\n self.burst_gamma = 0.05\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass Kurtoburst(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
98,318 |
3103f21a8e33608dd0c8a3a5f9693ff3593ed4b3
|
import os
os.system("python random_walk.py --ZN 50 --HN 10 --S 200 --g 40 --ZK 0.4 --HI 0.3 --makeplot --savefile movies_KA/RW_ZN_50_HN_10.gif")
os.system("python random_walk.py --ZN 10 --HN 50 --S 200 --g 40 --ZK 0.4 --HI 0.3 --makeplot --savefile movies_KA/RW_ZN_10_HN_50.gif")
os.system("python random_walk.py --ZN 50 --HN 50 --S 200 --g 40 --ZK 0.4 --HI 0.3 --makeplot --savefile movies_KA/RW_ZN_50_HN_50.gif")
|
[
"import os\n\nos.system(\"python random_walk.py --ZN 50 --HN 10 --S 200 --g 40 --ZK 0.4 --HI 0.3 --makeplot --savefile movies_KA/RW_ZN_50_HN_10.gif\")\nos.system(\"python random_walk.py --ZN 10 --HN 50 --S 200 --g 40 --ZK 0.4 --HI 0.3 --makeplot --savefile movies_KA/RW_ZN_10_HN_50.gif\")\nos.system(\"python random_walk.py --ZN 50 --HN 50 --S 200 --g 40 --ZK 0.4 --HI 0.3 --makeplot --savefile movies_KA/RW_ZN_50_HN_50.gif\")\n",
"import os\nos.system(\n 'python random_walk.py --ZN 50 --HN 10 --S 200 --g 40 --ZK 0.4 --HI 0.3 --makeplot --savefile movies_KA/RW_ZN_50_HN_10.gif'\n )\nos.system(\n 'python random_walk.py --ZN 10 --HN 50 --S 200 --g 40 --ZK 0.4 --HI 0.3 --makeplot --savefile movies_KA/RW_ZN_10_HN_50.gif'\n )\nos.system(\n 'python random_walk.py --ZN 50 --HN 50 --S 200 --g 40 --ZK 0.4 --HI 0.3 --makeplot --savefile movies_KA/RW_ZN_50_HN_50.gif'\n )\n",
"<import token>\nos.system(\n 'python random_walk.py --ZN 50 --HN 10 --S 200 --g 40 --ZK 0.4 --HI 0.3 --makeplot --savefile movies_KA/RW_ZN_50_HN_10.gif'\n )\nos.system(\n 'python random_walk.py --ZN 10 --HN 50 --S 200 --g 40 --ZK 0.4 --HI 0.3 --makeplot --savefile movies_KA/RW_ZN_10_HN_50.gif'\n )\nos.system(\n 'python random_walk.py --ZN 50 --HN 50 --S 200 --g 40 --ZK 0.4 --HI 0.3 --makeplot --savefile movies_KA/RW_ZN_50_HN_50.gif'\n )\n",
"<import token>\n<code token>\n"
] | false |
98,319 |
7c7845e49c6f9618442e041650cef38ef81de042
|
__author__ = 'Dom4n'
import os
import ftputil
import glob
import sensitive as s
import time
import threading
def upload():
directory = 'F:/LOGS/html/'
os.chdir(directory)
nieudane = []
pool_sema = threading.BoundedSemaphore(4)
pliki = glob.glob('*.html')
if len(pliki) == 0:
raise FileNotFoundError('BRAK PLIKOW!!!')
tim = time.time()
print('FTP -> START')
with ftputil.FTPHost(s.ftp_host, s.ftp_login, s.ftp_pass) as ftp_host:
for x in pliki:
try:
isok = ftp_host.upload_if_newer(x, ftp_host.curdir+'/all/'+x)
if isok:
print('upload pliku: '+x+' zakonczony powodzeniem')
else:
print('upload pliku: '+x+' NIEUDANY!!!!')
nieudane.append(x)
except Exception as e:
print('nieudane przeslanie pliku: '+x)
if len(nieudane) > 0:
print('Nieprzeslane pliki:\n'+str(nieudane))
else:
print('Wszystkie pliki przeslane!')
tim = time.time() - tim
print('Czas: '+str(tim))
print('FTP -> KONIEC')
|
[
"__author__ = 'Dom4n'\n\nimport os\nimport ftputil\nimport glob\nimport sensitive as s\nimport time\nimport threading\n\n\ndef upload():\n directory = 'F:/LOGS/html/'\n os.chdir(directory)\n nieudane = []\n pool_sema = threading.BoundedSemaphore(4)\n\n pliki = glob.glob('*.html')\n if len(pliki) == 0:\n raise FileNotFoundError('BRAK PLIKOW!!!')\n\n tim = time.time()\n print('FTP -> START')\n\n with ftputil.FTPHost(s.ftp_host, s.ftp_login, s.ftp_pass) as ftp_host:\n for x in pliki:\n try:\n isok = ftp_host.upload_if_newer(x, ftp_host.curdir+'/all/'+x)\n if isok:\n print('upload pliku: '+x+' zakonczony powodzeniem')\n else:\n print('upload pliku: '+x+' NIEUDANY!!!!')\n nieudane.append(x)\n except Exception as e:\n print('nieudane przeslanie pliku: '+x)\n\n if len(nieudane) > 0:\n print('Nieprzeslane pliki:\\n'+str(nieudane))\n else:\n print('Wszystkie pliki przeslane!')\n\n tim = time.time() - tim\n print('Czas: '+str(tim))\n print('FTP -> KONIEC')",
"__author__ = 'Dom4n'\nimport os\nimport ftputil\nimport glob\nimport sensitive as s\nimport time\nimport threading\n\n\ndef upload():\n directory = 'F:/LOGS/html/'\n os.chdir(directory)\n nieudane = []\n pool_sema = threading.BoundedSemaphore(4)\n pliki = glob.glob('*.html')\n if len(pliki) == 0:\n raise FileNotFoundError('BRAK PLIKOW!!!')\n tim = time.time()\n print('FTP -> START')\n with ftputil.FTPHost(s.ftp_host, s.ftp_login, s.ftp_pass) as ftp_host:\n for x in pliki:\n try:\n isok = ftp_host.upload_if_newer(x, ftp_host.curdir +\n '/all/' + x)\n if isok:\n print('upload pliku: ' + x + ' zakonczony powodzeniem')\n else:\n print('upload pliku: ' + x + ' NIEUDANY!!!!')\n nieudane.append(x)\n except Exception as e:\n print('nieudane przeslanie pliku: ' + x)\n if len(nieudane) > 0:\n print('Nieprzeslane pliki:\\n' + str(nieudane))\n else:\n print('Wszystkie pliki przeslane!')\n tim = time.time() - tim\n print('Czas: ' + str(tim))\n print('FTP -> KONIEC')\n",
"__author__ = 'Dom4n'\n<import token>\n\n\ndef upload():\n directory = 'F:/LOGS/html/'\n os.chdir(directory)\n nieudane = []\n pool_sema = threading.BoundedSemaphore(4)\n pliki = glob.glob('*.html')\n if len(pliki) == 0:\n raise FileNotFoundError('BRAK PLIKOW!!!')\n tim = time.time()\n print('FTP -> START')\n with ftputil.FTPHost(s.ftp_host, s.ftp_login, s.ftp_pass) as ftp_host:\n for x in pliki:\n try:\n isok = ftp_host.upload_if_newer(x, ftp_host.curdir +\n '/all/' + x)\n if isok:\n print('upload pliku: ' + x + ' zakonczony powodzeniem')\n else:\n print('upload pliku: ' + x + ' NIEUDANY!!!!')\n nieudane.append(x)\n except Exception as e:\n print('nieudane przeslanie pliku: ' + x)\n if len(nieudane) > 0:\n print('Nieprzeslane pliki:\\n' + str(nieudane))\n else:\n print('Wszystkie pliki przeslane!')\n tim = time.time() - tim\n print('Czas: ' + str(tim))\n print('FTP -> KONIEC')\n",
"<assignment token>\n<import token>\n\n\ndef upload():\n directory = 'F:/LOGS/html/'\n os.chdir(directory)\n nieudane = []\n pool_sema = threading.BoundedSemaphore(4)\n pliki = glob.glob('*.html')\n if len(pliki) == 0:\n raise FileNotFoundError('BRAK PLIKOW!!!')\n tim = time.time()\n print('FTP -> START')\n with ftputil.FTPHost(s.ftp_host, s.ftp_login, s.ftp_pass) as ftp_host:\n for x in pliki:\n try:\n isok = ftp_host.upload_if_newer(x, ftp_host.curdir +\n '/all/' + x)\n if isok:\n print('upload pliku: ' + x + ' zakonczony powodzeniem')\n else:\n print('upload pliku: ' + x + ' NIEUDANY!!!!')\n nieudane.append(x)\n except Exception as e:\n print('nieudane przeslanie pliku: ' + x)\n if len(nieudane) > 0:\n print('Nieprzeslane pliki:\\n' + str(nieudane))\n else:\n print('Wszystkie pliki przeslane!')\n tim = time.time() - tim\n print('Czas: ' + str(tim))\n print('FTP -> KONIEC')\n",
"<assignment token>\n<import token>\n<function token>\n"
] | false |
98,320 |
fb20e3f4bc237c777ce45523b0ca1439e956df4c
|
from django.urls import path
from . import views
app_name = "slackapp"
urlpatterns = [
#path('', views.slackmessage, name = "slackapp"),
path('reminders/', views.get_reminders, name = "reminders"),
path('reminder/create/', views.create_reminder, name = "reminder_create"),
path('reminder/<str:id>/delete/', views.delete_reminder, name = "reminder_delete"),
path('users/', views.get_users, name='users'),
path('user/by_email/<str:email>/',views.user_by_email, name='user_by_email')
#path('slackappp/', views.SlackView.as_view(), name="slackapp_"),
#path('misreminderss/', views.MisReminders.as_view(), name="misreminderss")
]
|
[
"from django.urls import path\nfrom . import views\n\napp_name = \"slackapp\"\nurlpatterns = [\n #path('', views.slackmessage, name = \"slackapp\"),\n path('reminders/', views.get_reminders, name = \"reminders\"),\n path('reminder/create/', views.create_reminder, name = \"reminder_create\"),\n path('reminder/<str:id>/delete/', views.delete_reminder, name = \"reminder_delete\"),\n\n path('users/', views.get_users, name='users'),\n path('user/by_email/<str:email>/',views.user_by_email, name='user_by_email')\n \n #path('slackappp/', views.SlackView.as_view(), name=\"slackapp_\"),\n #path('misreminderss/', views.MisReminders.as_view(), name=\"misreminderss\")\n]",
"from django.urls import path\nfrom . import views\napp_name = 'slackapp'\nurlpatterns = [path('reminders/', views.get_reminders, name='reminders'),\n path('reminder/create/', views.create_reminder, name='reminder_create'),\n path('reminder/<str:id>/delete/', views.delete_reminder, name=\n 'reminder_delete'), path('users/', views.get_users, name='users'), path\n ('user/by_email/<str:email>/', views.user_by_email, name='user_by_email')]\n",
"<import token>\napp_name = 'slackapp'\nurlpatterns = [path('reminders/', views.get_reminders, name='reminders'),\n path('reminder/create/', views.create_reminder, name='reminder_create'),\n path('reminder/<str:id>/delete/', views.delete_reminder, name=\n 'reminder_delete'), path('users/', views.get_users, name='users'), path\n ('user/by_email/<str:email>/', views.user_by_email, name='user_by_email')]\n",
"<import token>\n<assignment token>\n"
] | false |
98,321 |
916e845454c378b9df5534b2e61b1dd7fe5566cc
|
import time
from deu_ruim.domain.entities.story import *
from deu_ruim.domain.value.location import *
class StoryService():
def __init__(self, story_repository):
self.story_repository = story_repository
def create_story(self, title, description, lat, lon, category, tags=[]):
story = Story(None, title, description, Location(lat, lon), category, tags)
return self.story_repository.persist_story(story)
def search_story(self, tags, time_max=None):
return self.story_repository.search_stories(set(tags), time_max or time.time())
def disqualify_story(self, story_id):
story = self.story_repository.find_story(story_id)
if story != None:
story.disqualify()
self.story_repository.persist_story(story)
return story
return None
def get_stories(self, time):
return self.story_repository.get_stories(time)
def get_all_stories(self):
return self.story_repository.get_all_stories()
|
[
"import time\nfrom deu_ruim.domain.entities.story import *\nfrom deu_ruim.domain.value.location import *\n\nclass StoryService():\n def __init__(self, story_repository):\n self.story_repository = story_repository\n\n def create_story(self, title, description, lat, lon, category, tags=[]):\n story = Story(None, title, description, Location(lat, lon), category, tags)\n return self.story_repository.persist_story(story)\n\n def search_story(self, tags, time_max=None):\n return self.story_repository.search_stories(set(tags), time_max or time.time())\n \n def disqualify_story(self, story_id):\n story = self.story_repository.find_story(story_id)\n if story != None:\n story.disqualify()\n self.story_repository.persist_story(story)\n return story\n return None\n\n def get_stories(self, time):\n return self.story_repository.get_stories(time)\n\n def get_all_stories(self):\n return self.story_repository.get_all_stories()\n",
"import time\nfrom deu_ruim.domain.entities.story import *\nfrom deu_ruim.domain.value.location import *\n\n\nclass StoryService:\n\n def __init__(self, story_repository):\n self.story_repository = story_repository\n\n def create_story(self, title, description, lat, lon, category, tags=[]):\n story = Story(None, title, description, Location(lat, lon),\n category, tags)\n return self.story_repository.persist_story(story)\n\n def search_story(self, tags, time_max=None):\n return self.story_repository.search_stories(set(tags), time_max or\n time.time())\n\n def disqualify_story(self, story_id):\n story = self.story_repository.find_story(story_id)\n if story != None:\n story.disqualify()\n self.story_repository.persist_story(story)\n return story\n return None\n\n def get_stories(self, time):\n return self.story_repository.get_stories(time)\n\n def get_all_stories(self):\n return self.story_repository.get_all_stories()\n",
"<import token>\n\n\nclass StoryService:\n\n def __init__(self, story_repository):\n self.story_repository = story_repository\n\n def create_story(self, title, description, lat, lon, category, tags=[]):\n story = Story(None, title, description, Location(lat, lon),\n category, tags)\n return self.story_repository.persist_story(story)\n\n def search_story(self, tags, time_max=None):\n return self.story_repository.search_stories(set(tags), time_max or\n time.time())\n\n def disqualify_story(self, story_id):\n story = self.story_repository.find_story(story_id)\n if story != None:\n story.disqualify()\n self.story_repository.persist_story(story)\n return story\n return None\n\n def get_stories(self, time):\n return self.story_repository.get_stories(time)\n\n def get_all_stories(self):\n return self.story_repository.get_all_stories()\n",
"<import token>\n\n\nclass StoryService:\n\n def __init__(self, story_repository):\n self.story_repository = story_repository\n <function token>\n\n def search_story(self, tags, time_max=None):\n return self.story_repository.search_stories(set(tags), time_max or\n time.time())\n\n def disqualify_story(self, story_id):\n story = self.story_repository.find_story(story_id)\n if story != None:\n story.disqualify()\n self.story_repository.persist_story(story)\n return story\n return None\n\n def get_stories(self, time):\n return self.story_repository.get_stories(time)\n\n def get_all_stories(self):\n return self.story_repository.get_all_stories()\n",
"<import token>\n\n\nclass StoryService:\n <function token>\n <function token>\n\n def search_story(self, tags, time_max=None):\n return self.story_repository.search_stories(set(tags), time_max or\n time.time())\n\n def disqualify_story(self, story_id):\n story = self.story_repository.find_story(story_id)\n if story != None:\n story.disqualify()\n self.story_repository.persist_story(story)\n return story\n return None\n\n def get_stories(self, time):\n return self.story_repository.get_stories(time)\n\n def get_all_stories(self):\n return self.story_repository.get_all_stories()\n",
"<import token>\n\n\nclass StoryService:\n <function token>\n <function token>\n <function token>\n\n def disqualify_story(self, story_id):\n story = self.story_repository.find_story(story_id)\n if story != None:\n story.disqualify()\n self.story_repository.persist_story(story)\n return story\n return None\n\n def get_stories(self, time):\n return self.story_repository.get_stories(time)\n\n def get_all_stories(self):\n return self.story_repository.get_all_stories()\n",
"<import token>\n\n\nclass StoryService:\n <function token>\n <function token>\n <function token>\n\n def disqualify_story(self, story_id):\n story = self.story_repository.find_story(story_id)\n if story != None:\n story.disqualify()\n self.story_repository.persist_story(story)\n return story\n return None\n\n def get_stories(self, time):\n return self.story_repository.get_stories(time)\n <function token>\n",
"<import token>\n\n\nclass StoryService:\n <function token>\n <function token>\n <function token>\n\n def disqualify_story(self, story_id):\n story = self.story_repository.find_story(story_id)\n if story != None:\n story.disqualify()\n self.story_repository.persist_story(story)\n return story\n return None\n <function token>\n <function token>\n",
"<import token>\n\n\nclass StoryService:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,322 |
52dca1edcbb03d0c265e47bed04f825e91f4cf20
|
from .random_sampler import random_sampler
|
[
"from .random_sampler import random_sampler\n",
"<import token>\n"
] | false |
98,323 |
443bc435274aaa95cacd165d030fbd8bcc5c81dd
|
#encoding=utf8
from sklearn.ensemble import RandomForestClassifier
from model.base_model import Model
from sklearn.cross_validation import cross_val_predict
from sklearn.metrics import classification_report
import logging
class RandomForestClassification(Model):
def __init__(self):
Model.__init__(self)
self.model = RandomForestClassifier(n_jobs=-1, random_state=2016, verbose=1)
def fit(self, x_train, y_train, need_transform_label=False):
param_grid = {'n_estimators': [100, 200]}
self.model = self.grid_search_fit_(self.model, param_grid, x_train, y_train)
def predict(self, x_test, need_transform_label=False):
return self.model.predict(x_test)
|
[
"#encoding=utf8\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom model.base_model import Model\nfrom sklearn.cross_validation import cross_val_predict\nfrom sklearn.metrics import classification_report\nimport logging\n\n\n\nclass RandomForestClassification(Model):\n\n def __init__(self):\n Model.__init__(self)\n self.model = RandomForestClassifier(n_jobs=-1, random_state=2016, verbose=1)\n\n def fit(self, x_train, y_train, need_transform_label=False):\n param_grid = {'n_estimators': [100, 200]}\n self.model = self.grid_search_fit_(self.model, param_grid, x_train, y_train)\n\n def predict(self, x_test, need_transform_label=False):\n return self.model.predict(x_test)",
"from sklearn.ensemble import RandomForestClassifier\nfrom model.base_model import Model\nfrom sklearn.cross_validation import cross_val_predict\nfrom sklearn.metrics import classification_report\nimport logging\n\n\nclass RandomForestClassification(Model):\n\n def __init__(self):\n Model.__init__(self)\n self.model = RandomForestClassifier(n_jobs=-1, random_state=2016,\n verbose=1)\n\n def fit(self, x_train, y_train, need_transform_label=False):\n param_grid = {'n_estimators': [100, 200]}\n self.model = self.grid_search_fit_(self.model, param_grid, x_train,\n y_train)\n\n def predict(self, x_test, need_transform_label=False):\n return self.model.predict(x_test)\n",
"<import token>\n\n\nclass RandomForestClassification(Model):\n\n def __init__(self):\n Model.__init__(self)\n self.model = RandomForestClassifier(n_jobs=-1, random_state=2016,\n verbose=1)\n\n def fit(self, x_train, y_train, need_transform_label=False):\n param_grid = {'n_estimators': [100, 200]}\n self.model = self.grid_search_fit_(self.model, param_grid, x_train,\n y_train)\n\n def predict(self, x_test, need_transform_label=False):\n return self.model.predict(x_test)\n",
"<import token>\n\n\nclass RandomForestClassification(Model):\n <function token>\n\n def fit(self, x_train, y_train, need_transform_label=False):\n param_grid = {'n_estimators': [100, 200]}\n self.model = self.grid_search_fit_(self.model, param_grid, x_train,\n y_train)\n\n def predict(self, x_test, need_transform_label=False):\n return self.model.predict(x_test)\n",
"<import token>\n\n\nclass RandomForestClassification(Model):\n <function token>\n\n def fit(self, x_train, y_train, need_transform_label=False):\n param_grid = {'n_estimators': [100, 200]}\n self.model = self.grid_search_fit_(self.model, param_grid, x_train,\n y_train)\n <function token>\n",
"<import token>\n\n\nclass RandomForestClassification(Model):\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,324 |
c02b365cce48770a0789adf15b346f7744de2e6f
|
def solution(strings, n):
return sorted(sorted(strings), key=lambda x: x[n])
# return sorted(strings, key=lambda x: x[n]+x[:]) # 이 방법도 있음.
strings = ["sun", "bed", "car"]
strings1 = ["abce", "abcd", "cdx"]
n = 1
print(solution(strings, n))
n1 = 2
print(solution(strings1, n1))
|
[
"def solution(strings, n):\n return sorted(sorted(strings), key=lambda x: x[n])\n # return sorted(strings, key=lambda x: x[n]+x[:]) # 이 방법도 있음.\n\n\nstrings = [\"sun\", \"bed\", \"car\"]\nstrings1 = [\"abce\", \"abcd\", \"cdx\"]\n\nn = 1\nprint(solution(strings, n))\nn1 = 2\nprint(solution(strings1, n1))\n",
"def solution(strings, n):\n return sorted(sorted(strings), key=lambda x: x[n])\n\n\nstrings = ['sun', 'bed', 'car']\nstrings1 = ['abce', 'abcd', 'cdx']\nn = 1\nprint(solution(strings, n))\nn1 = 2\nprint(solution(strings1, n1))\n",
"def solution(strings, n):\n return sorted(sorted(strings), key=lambda x: x[n])\n\n\n<assignment token>\nprint(solution(strings, n))\n<assignment token>\nprint(solution(strings1, n1))\n",
"def solution(strings, n):\n return sorted(sorted(strings), key=lambda x: x[n])\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,325 |
0a4d4b6af406d2a520645e030ecee308594e1a4d
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-07-17 17:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Dashboard', '0016_auto_20170717_1216'),
]
operations = [
migrations.RenameField(
model_name='change',
old_name='denial_exp',
new_name='customer_deny_exp',
),
migrations.AddField(
model_name='change',
name='internal_deny_exp',
field=models.CharField(max_length=1000, null=True),
),
]
|
[
"# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2017-07-17 17:10\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Dashboard', '0016_auto_20170717_1216'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='change',\n old_name='denial_exp',\n new_name='customer_deny_exp',\n ),\n migrations.AddField(\n model_name='change',\n name='internal_deny_exp',\n field=models.CharField(max_length=1000, null=True),\n ),\n ]\n",
"from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Dashboard', '0016_auto_20170717_1216')]\n operations = [migrations.RenameField(model_name='change', old_name=\n 'denial_exp', new_name='customer_deny_exp'), migrations.AddField(\n model_name='change', name='internal_deny_exp', field=models.\n CharField(max_length=1000, null=True))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Dashboard', '0016_auto_20170717_1216')]\n operations = [migrations.RenameField(model_name='change', old_name=\n 'denial_exp', new_name='customer_deny_exp'), migrations.AddField(\n model_name='change', name='internal_deny_exp', field=models.\n CharField(max_length=1000, null=True))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,326 |
f1724358841e1913a3a601b4a2f25f056e34f683
|
import requests
print(requests.get("https://bbs.csdn.net/forums/ios").text)
|
[
"import requests\n\nprint(requests.get(\"https://bbs.csdn.net/forums/ios\").text)",
"import requests\nprint(requests.get('https://bbs.csdn.net/forums/ios').text)\n",
"<import token>\nprint(requests.get('https://bbs.csdn.net/forums/ios').text)\n",
"<import token>\n<code token>\n"
] | false |
98,327 |
675fa5291c6aecebba0f85c6e1f173a93f786954
|
import sys
import time
for r in range(0,20):
time.sleep(15)
print("doorStayedOpen", flush = True)
time.sleep(5)
print("doorClosed", flush = True)
|
[
"import sys\nimport time\n\n\nfor r in range(0,20):\n time.sleep(15)\n print(\"doorStayedOpen\", flush = True)\n time.sleep(5)\n print(\"doorClosed\", flush = True)\n\n\n \n\n",
"import sys\nimport time\nfor r in range(0, 20):\n time.sleep(15)\n print('doorStayedOpen', flush=True)\n time.sleep(5)\n print('doorClosed', flush=True)\n",
"<import token>\nfor r in range(0, 20):\n time.sleep(15)\n print('doorStayedOpen', flush=True)\n time.sleep(5)\n print('doorClosed', flush=True)\n",
"<import token>\n<code token>\n"
] | false |
98,328 |
cce35afc9f5fdc2197acdff8c23d89fac37ce954
|
#! /usr/bin/env python
from geometry_msgs.msg import PoseStamped
import rospy
def wait_for_time():
"""Wait for simulated time to begin.
"""
while rospy.Time().now().to_sec() == 0:
pass
class ArTagReader(object):
def __init__(self):
self.markers = []
def callback(self, msg):
self.markers = msg.markers
def main():
# wait_for_time()
start = PoseStamped()
start.header.frame_id = 'base_link'
start.pose.position.x = 0.5
start.pose.position.y = 0.5
start.pose.position.z = 0.75
reader = ArTagReader()
print reader
sub = rospy.Subscriber(reader.callback) # Subscribe to AR tag poses, use reader.callback
while len(reader.markers) == 0:
rospy.sleep(0.1)
for marker in reader.markers:
print reader.markers
# # error = arm.move_to_pose(???)
# if error is None:
# rospy.loginfo('Moved to marker {}'.format(marker.id))
# return
# else:
# rospy.logwarn('Failed to move to marker {}'.format(marker.id))
# rospy.logerr('Failed to move to any markers!')
if __name__ == '__main__':
main()
|
[
"#! /usr/bin/env python\n\nfrom geometry_msgs.msg import PoseStamped\nimport rospy\n\n\ndef wait_for_time():\n \"\"\"Wait for simulated time to begin.\n \"\"\"\n while rospy.Time().now().to_sec() == 0:\n pass\n\n\nclass ArTagReader(object):\n def __init__(self):\n self.markers = []\n\n def callback(self, msg):\n self.markers = msg.markers\n\n\ndef main():\n # wait_for_time()\n\n start = PoseStamped()\n start.header.frame_id = 'base_link'\n start.pose.position.x = 0.5\n start.pose.position.y = 0.5\n start.pose.position.z = 0.75\n\n \n reader = ArTagReader()\n print reader\n sub = rospy.Subscriber(reader.callback) # Subscribe to AR tag poses, use reader.callback\n \n while len(reader.markers) == 0:\n rospy.sleep(0.1)\n \n for marker in reader.markers:\n\n print reader.markers\n # # error = arm.move_to_pose(???)\n # if error is None:\n # rospy.loginfo('Moved to marker {}'.format(marker.id))\n # return\n # else:\n # rospy.logwarn('Failed to move to marker {}'.format(marker.id))\n # rospy.logerr('Failed to move to any markers!')\n\n\nif __name__ == '__main__':\n main()"
] | true |
98,329 |
9dcfe569da7a913d1350ce50dc3c73ab0a178fac
|
import shutil
import luigi
from qgreenland.constants.paths import FETCH_DATASETS_DIR, PRIVATE_ARCHIVE_DIR
from qgreenland.models.config.asset import (
CmrAsset,
HttpAsset,
ManualAsset,
RepositoryAsset,
)
from qgreenland.util.cmr import get_cmr_granule
from qgreenland.util.command import interpolate_args, run_qgr_command
from qgreenland.util.config.config import get_config
from qgreenland.util.edl import create_earthdata_authenticated_session as make_session
from qgreenland.util.layer import datasource_dirname
from qgreenland.util.luigi.target import temporary_path_dir
from qgreenland.util.request import fetch_and_write_file
# TODO: call this 'FetchDataset'? 'FetchAsset'?
class FetchTask(luigi.Task):
dataset_id = luigi.Parameter()
asset_id = luigi.Parameter()
@property
def output_name(self):
return datasource_dirname(
dataset_id=self.dataset_cfg.id,
asset_id=self.asset_cfg.id,
)
@property
def dataset_cfg(self):
config = get_config()
return config.datasets[self.dataset_id]
@property
def asset_cfg(self):
return self.dataset_cfg.assets[self.asset_id]
class FetchCmrGranule(FetchTask):
session = None
def output(self):
path = FETCH_DATASETS_DIR / self.output_name
return luigi.LocalTarget(path)
def run(self):
if type(self.asset_cfg) is not CmrAsset:
raise RuntimeError(f"Expected CMR asset. Received: {self.asset_cfg}")
granule = get_cmr_granule(
granule_ur=self.asset_cfg.granule_ur,
collection_concept_id=self.asset_cfg.collection_concept_id,
)
with temporary_path_dir(self.output()) as temp_path:
for url in granule.urls:
if not self.session:
self.session = make_session(hosts=[url], verify=True)
fetch_and_write_file(
url,
output_dir=temp_path,
session=self.session,
)
class FetchDataFiles(FetchTask):
def output(self):
return luigi.LocalTarget(
FETCH_DATASETS_DIR / self.output_name,
format=luigi.format.Nop,
)
def run(self):
if type(self.asset_cfg) is not HttpAsset:
raise RuntimeError(f"Expected HTTP asset. Received: {self.asset_cfg}")
with temporary_path_dir(self.output()) as temp_path:
for url in self.asset_cfg.urls:
fetch_and_write_file(
url,
output_dir=temp_path,
verify=self.asset_cfg.verify_tls,
)
class FetchLocalDataFiles(FetchTask):
"""Fetch data that's already on the local installation.
e.g. "Manual" assets which are downloaded by humans, "Repository" assets
which are present in this git repo.
"""
def output(self):
return luigi.LocalTarget(
FETCH_DATASETS_DIR / self.output_name,
format=luigi.format.Nop,
)
def run(self):
if isinstance(self.asset_cfg, RepositoryAsset):
with temporary_path_dir(self.output()) as temp_path:
evaluated_filepath = self.asset_cfg.filepath.eval()
out_path = temp_path / evaluated_filepath.name
shutil.copy2(evaluated_filepath, out_path)
elif isinstance(self.asset_cfg, ManualAsset):
local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id
with temporary_path_dir(self.output()) as temp_path:
shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)
else:
raise RuntimeError(
"You selected an unsupported access_method:" f" {type(self.asset_cfg)}",
)
class FetchDataWithCommand(FetchTask):
"""Fetch data using a command, writing to '{output_dir}'."""
def output(self):
return luigi.LocalTarget(
FETCH_DATASETS_DIR / self.output_name,
format=luigi.format.Nop,
)
def run(self):
with temporary_path_dir(self.output()) as temp_path:
run_qgr_command(
interpolate_args(
self.asset_cfg.args,
output_dir=temp_path,
),
)
|
[
"import shutil\n\nimport luigi\n\nfrom qgreenland.constants.paths import FETCH_DATASETS_DIR, PRIVATE_ARCHIVE_DIR\nfrom qgreenland.models.config.asset import (\n CmrAsset,\n HttpAsset,\n ManualAsset,\n RepositoryAsset,\n)\nfrom qgreenland.util.cmr import get_cmr_granule\nfrom qgreenland.util.command import interpolate_args, run_qgr_command\nfrom qgreenland.util.config.config import get_config\nfrom qgreenland.util.edl import create_earthdata_authenticated_session as make_session\nfrom qgreenland.util.layer import datasource_dirname\nfrom qgreenland.util.luigi.target import temporary_path_dir\nfrom qgreenland.util.request import fetch_and_write_file\n\n\n# TODO: call this 'FetchDataset'? 'FetchAsset'?\nclass FetchTask(luigi.Task):\n dataset_id = luigi.Parameter()\n asset_id = luigi.Parameter()\n\n @property\n def output_name(self):\n return datasource_dirname(\n dataset_id=self.dataset_cfg.id,\n asset_id=self.asset_cfg.id,\n )\n\n @property\n def dataset_cfg(self):\n config = get_config()\n return config.datasets[self.dataset_id]\n\n @property\n def asset_cfg(self):\n return self.dataset_cfg.assets[self.asset_id]\n\n\nclass FetchCmrGranule(FetchTask):\n session = None\n\n def output(self):\n path = FETCH_DATASETS_DIR / self.output_name\n return luigi.LocalTarget(path)\n\n def run(self):\n if type(self.asset_cfg) is not CmrAsset:\n raise RuntimeError(f\"Expected CMR asset. Received: {self.asset_cfg}\")\n\n granule = get_cmr_granule(\n granule_ur=self.asset_cfg.granule_ur,\n collection_concept_id=self.asset_cfg.collection_concept_id,\n )\n\n with temporary_path_dir(self.output()) as temp_path:\n for url in granule.urls:\n if not self.session:\n self.session = make_session(hosts=[url], verify=True)\n\n fetch_and_write_file(\n url,\n output_dir=temp_path,\n session=self.session,\n )\n\n\nclass FetchDataFiles(FetchTask):\n def output(self):\n return luigi.LocalTarget(\n FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop,\n )\n\n def run(self):\n if type(self.asset_cfg) is not HttpAsset:\n raise RuntimeError(f\"Expected HTTP asset. Received: {self.asset_cfg}\")\n\n with temporary_path_dir(self.output()) as temp_path:\n for url in self.asset_cfg.urls:\n fetch_and_write_file(\n url,\n output_dir=temp_path,\n verify=self.asset_cfg.verify_tls,\n )\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(\n FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop,\n )\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n\n else:\n raise RuntimeError(\n \"You selected an unsupported access_method:\" f\" {type(self.asset_cfg)}\",\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(\n FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop,\n )\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(\n interpolate_args(\n self.asset_cfg.args,\n output_dir=temp_path,\n ),\n )\n",
"import shutil\nimport luigi\nfrom qgreenland.constants.paths import FETCH_DATASETS_DIR, PRIVATE_ARCHIVE_DIR\nfrom qgreenland.models.config.asset import CmrAsset, HttpAsset, ManualAsset, RepositoryAsset\nfrom qgreenland.util.cmr import get_cmr_granule\nfrom qgreenland.util.command import interpolate_args, run_qgr_command\nfrom qgreenland.util.config.config import get_config\nfrom qgreenland.util.edl import create_earthdata_authenticated_session as make_session\nfrom qgreenland.util.layer import datasource_dirname\nfrom qgreenland.util.luigi.target import temporary_path_dir\nfrom qgreenland.util.request import fetch_and_write_file\n\n\nclass FetchTask(luigi.Task):\n dataset_id = luigi.Parameter()\n asset_id = luigi.Parameter()\n\n @property\n def output_name(self):\n return datasource_dirname(dataset_id=self.dataset_cfg.id, asset_id=\n self.asset_cfg.id)\n\n @property\n def dataset_cfg(self):\n config = get_config()\n return config.datasets[self.dataset_id]\n\n @property\n def asset_cfg(self):\n return self.dataset_cfg.assets[self.asset_id]\n\n\nclass FetchCmrGranule(FetchTask):\n session = None\n\n def output(self):\n path = FETCH_DATASETS_DIR / self.output_name\n return luigi.LocalTarget(path)\n\n def run(self):\n if type(self.asset_cfg) is not CmrAsset:\n raise RuntimeError(\n f'Expected CMR asset. Received: {self.asset_cfg}')\n granule = get_cmr_granule(granule_ur=self.asset_cfg.granule_ur,\n collection_concept_id=self.asset_cfg.collection_concept_id)\n with temporary_path_dir(self.output()) as temp_path:\n for url in granule.urls:\n if not self.session:\n self.session = make_session(hosts=[url], verify=True)\n fetch_and_write_file(url, output_dir=temp_path, session=\n self.session)\n\n\nclass FetchDataFiles(FetchTask):\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if type(self.asset_cfg) is not HttpAsset:\n raise RuntimeError(\n f'Expected HTTP asset. Received: {self.asset_cfg}')\n with temporary_path_dir(self.output()) as temp_path:\n for url in self.asset_cfg.urls:\n fetch_and_write_file(url, output_dir=temp_path, verify=self\n .asset_cfg.verify_tls)\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n\n\nclass FetchTask(luigi.Task):\n dataset_id = luigi.Parameter()\n asset_id = luigi.Parameter()\n\n @property\n def output_name(self):\n return datasource_dirname(dataset_id=self.dataset_cfg.id, asset_id=\n self.asset_cfg.id)\n\n @property\n def dataset_cfg(self):\n config = get_config()\n return config.datasets[self.dataset_id]\n\n @property\n def asset_cfg(self):\n return self.dataset_cfg.assets[self.asset_id]\n\n\nclass FetchCmrGranule(FetchTask):\n session = None\n\n def output(self):\n path = FETCH_DATASETS_DIR / self.output_name\n return luigi.LocalTarget(path)\n\n def run(self):\n if type(self.asset_cfg) is not CmrAsset:\n raise RuntimeError(\n f'Expected CMR asset. Received: {self.asset_cfg}')\n granule = get_cmr_granule(granule_ur=self.asset_cfg.granule_ur,\n collection_concept_id=self.asset_cfg.collection_concept_id)\n with temporary_path_dir(self.output()) as temp_path:\n for url in granule.urls:\n if not self.session:\n self.session = make_session(hosts=[url], verify=True)\n fetch_and_write_file(url, output_dir=temp_path, session=\n self.session)\n\n\nclass FetchDataFiles(FetchTask):\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if type(self.asset_cfg) is not HttpAsset:\n raise RuntimeError(\n f'Expected HTTP asset. Received: {self.asset_cfg}')\n with temporary_path_dir(self.output()) as temp_path:\n for url in self.asset_cfg.urls:\n fetch_and_write_file(url, output_dir=temp_path, verify=self\n .asset_cfg.verify_tls)\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n\n\nclass FetchTask(luigi.Task):\n <assignment token>\n <assignment token>\n\n @property\n def output_name(self):\n return datasource_dirname(dataset_id=self.dataset_cfg.id, asset_id=\n self.asset_cfg.id)\n\n @property\n def dataset_cfg(self):\n config = get_config()\n return config.datasets[self.dataset_id]\n\n @property\n def asset_cfg(self):\n return self.dataset_cfg.assets[self.asset_id]\n\n\nclass FetchCmrGranule(FetchTask):\n session = None\n\n def output(self):\n path = FETCH_DATASETS_DIR / self.output_name\n return luigi.LocalTarget(path)\n\n def run(self):\n if type(self.asset_cfg) is not CmrAsset:\n raise RuntimeError(\n f'Expected CMR asset. Received: {self.asset_cfg}')\n granule = get_cmr_granule(granule_ur=self.asset_cfg.granule_ur,\n collection_concept_id=self.asset_cfg.collection_concept_id)\n with temporary_path_dir(self.output()) as temp_path:\n for url in granule.urls:\n if not self.session:\n self.session = make_session(hosts=[url], verify=True)\n fetch_and_write_file(url, output_dir=temp_path, session=\n self.session)\n\n\nclass FetchDataFiles(FetchTask):\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if type(self.asset_cfg) is not HttpAsset:\n raise RuntimeError(\n f'Expected HTTP asset. Received: {self.asset_cfg}')\n with temporary_path_dir(self.output()) as temp_path:\n for url in self.asset_cfg.urls:\n fetch_and_write_file(url, output_dir=temp_path, verify=self\n .asset_cfg.verify_tls)\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n\n\nclass FetchTask(luigi.Task):\n <assignment token>\n <assignment token>\n\n @property\n def output_name(self):\n return datasource_dirname(dataset_id=self.dataset_cfg.id, asset_id=\n self.asset_cfg.id)\n\n @property\n def dataset_cfg(self):\n config = get_config()\n return config.datasets[self.dataset_id]\n <function token>\n\n\nclass FetchCmrGranule(FetchTask):\n session = None\n\n def output(self):\n path = FETCH_DATASETS_DIR / self.output_name\n return luigi.LocalTarget(path)\n\n def run(self):\n if type(self.asset_cfg) is not CmrAsset:\n raise RuntimeError(\n f'Expected CMR asset. Received: {self.asset_cfg}')\n granule = get_cmr_granule(granule_ur=self.asset_cfg.granule_ur,\n collection_concept_id=self.asset_cfg.collection_concept_id)\n with temporary_path_dir(self.output()) as temp_path:\n for url in granule.urls:\n if not self.session:\n self.session = make_session(hosts=[url], verify=True)\n fetch_and_write_file(url, output_dir=temp_path, session=\n self.session)\n\n\nclass FetchDataFiles(FetchTask):\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if type(self.asset_cfg) is not HttpAsset:\n raise RuntimeError(\n f'Expected HTTP asset. Received: {self.asset_cfg}')\n with temporary_path_dir(self.output()) as temp_path:\n for url in self.asset_cfg.urls:\n fetch_and_write_file(url, output_dir=temp_path, verify=self\n .asset_cfg.verify_tls)\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n\n\nclass FetchTask(luigi.Task):\n <assignment token>\n <assignment token>\n <function token>\n\n @property\n def dataset_cfg(self):\n config = get_config()\n return config.datasets[self.dataset_id]\n <function token>\n\n\nclass FetchCmrGranule(FetchTask):\n session = None\n\n def output(self):\n path = FETCH_DATASETS_DIR / self.output_name\n return luigi.LocalTarget(path)\n\n def run(self):\n if type(self.asset_cfg) is not CmrAsset:\n raise RuntimeError(\n f'Expected CMR asset. Received: {self.asset_cfg}')\n granule = get_cmr_granule(granule_ur=self.asset_cfg.granule_ur,\n collection_concept_id=self.asset_cfg.collection_concept_id)\n with temporary_path_dir(self.output()) as temp_path:\n for url in granule.urls:\n if not self.session:\n self.session = make_session(hosts=[url], verify=True)\n fetch_and_write_file(url, output_dir=temp_path, session=\n self.session)\n\n\nclass FetchDataFiles(FetchTask):\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if type(self.asset_cfg) is not HttpAsset:\n raise RuntimeError(\n f'Expected HTTP asset. Received: {self.asset_cfg}')\n with temporary_path_dir(self.output()) as temp_path:\n for url in self.asset_cfg.urls:\n fetch_and_write_file(url, output_dir=temp_path, verify=self\n .asset_cfg.verify_tls)\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n\n\nclass FetchTask(luigi.Task):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n\nclass FetchCmrGranule(FetchTask):\n session = None\n\n def output(self):\n path = FETCH_DATASETS_DIR / self.output_name\n return luigi.LocalTarget(path)\n\n def run(self):\n if type(self.asset_cfg) is not CmrAsset:\n raise RuntimeError(\n f'Expected CMR asset. Received: {self.asset_cfg}')\n granule = get_cmr_granule(granule_ur=self.asset_cfg.granule_ur,\n collection_concept_id=self.asset_cfg.collection_concept_id)\n with temporary_path_dir(self.output()) as temp_path:\n for url in granule.urls:\n if not self.session:\n self.session = make_session(hosts=[url], verify=True)\n fetch_and_write_file(url, output_dir=temp_path, session=\n self.session)\n\n\nclass FetchDataFiles(FetchTask):\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if type(self.asset_cfg) is not HttpAsset:\n raise RuntimeError(\n f'Expected HTTP asset. Received: {self.asset_cfg}')\n with temporary_path_dir(self.output()) as temp_path:\n for url in self.asset_cfg.urls:\n fetch_and_write_file(url, output_dir=temp_path, verify=self\n .asset_cfg.verify_tls)\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n\n\nclass FetchCmrGranule(FetchTask):\n session = None\n\n def output(self):\n path = FETCH_DATASETS_DIR / self.output_name\n return luigi.LocalTarget(path)\n\n def run(self):\n if type(self.asset_cfg) is not CmrAsset:\n raise RuntimeError(\n f'Expected CMR asset. Received: {self.asset_cfg}')\n granule = get_cmr_granule(granule_ur=self.asset_cfg.granule_ur,\n collection_concept_id=self.asset_cfg.collection_concept_id)\n with temporary_path_dir(self.output()) as temp_path:\n for url in granule.urls:\n if not self.session:\n self.session = make_session(hosts=[url], verify=True)\n fetch_and_write_file(url, output_dir=temp_path, session=\n self.session)\n\n\nclass FetchDataFiles(FetchTask):\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if type(self.asset_cfg) is not HttpAsset:\n raise RuntimeError(\n f'Expected HTTP asset. Received: {self.asset_cfg}')\n with temporary_path_dir(self.output()) as temp_path:\n for url in self.asset_cfg.urls:\n fetch_and_write_file(url, output_dir=temp_path, verify=self\n .asset_cfg.verify_tls)\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n\n\nclass FetchCmrGranule(FetchTask):\n <assignment token>\n\n def output(self):\n path = FETCH_DATASETS_DIR / self.output_name\n return luigi.LocalTarget(path)\n\n def run(self):\n if type(self.asset_cfg) is not CmrAsset:\n raise RuntimeError(\n f'Expected CMR asset. Received: {self.asset_cfg}')\n granule = get_cmr_granule(granule_ur=self.asset_cfg.granule_ur,\n collection_concept_id=self.asset_cfg.collection_concept_id)\n with temporary_path_dir(self.output()) as temp_path:\n for url in granule.urls:\n if not self.session:\n self.session = make_session(hosts=[url], verify=True)\n fetch_and_write_file(url, output_dir=temp_path, session=\n self.session)\n\n\nclass FetchDataFiles(FetchTask):\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if type(self.asset_cfg) is not HttpAsset:\n raise RuntimeError(\n f'Expected HTTP asset. Received: {self.asset_cfg}')\n with temporary_path_dir(self.output()) as temp_path:\n for url in self.asset_cfg.urls:\n fetch_and_write_file(url, output_dir=temp_path, verify=self\n .asset_cfg.verify_tls)\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n\n\nclass FetchCmrGranule(FetchTask):\n <assignment token>\n <function token>\n\n def run(self):\n if type(self.asset_cfg) is not CmrAsset:\n raise RuntimeError(\n f'Expected CMR asset. Received: {self.asset_cfg}')\n granule = get_cmr_granule(granule_ur=self.asset_cfg.granule_ur,\n collection_concept_id=self.asset_cfg.collection_concept_id)\n with temporary_path_dir(self.output()) as temp_path:\n for url in granule.urls:\n if not self.session:\n self.session = make_session(hosts=[url], verify=True)\n fetch_and_write_file(url, output_dir=temp_path, session=\n self.session)\n\n\nclass FetchDataFiles(FetchTask):\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if type(self.asset_cfg) is not HttpAsset:\n raise RuntimeError(\n f'Expected HTTP asset. Received: {self.asset_cfg}')\n with temporary_path_dir(self.output()) as temp_path:\n for url in self.asset_cfg.urls:\n fetch_and_write_file(url, output_dir=temp_path, verify=self\n .asset_cfg.verify_tls)\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n\n\nclass FetchCmrGranule(FetchTask):\n <assignment token>\n <function token>\n <function token>\n\n\nclass FetchDataFiles(FetchTask):\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if type(self.asset_cfg) is not HttpAsset:\n raise RuntimeError(\n f'Expected HTTP asset. Received: {self.asset_cfg}')\n with temporary_path_dir(self.output()) as temp_path:\n for url in self.asset_cfg.urls:\n fetch_and_write_file(url, output_dir=temp_path, verify=self\n .asset_cfg.verify_tls)\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n<class token>\n\n\nclass FetchDataFiles(FetchTask):\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if type(self.asset_cfg) is not HttpAsset:\n raise RuntimeError(\n f'Expected HTTP asset. Received: {self.asset_cfg}')\n with temporary_path_dir(self.output()) as temp_path:\n for url in self.asset_cfg.urls:\n fetch_and_write_file(url, output_dir=temp_path, verify=self\n .asset_cfg.verify_tls)\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n<class token>\n\n\nclass FetchDataFiles(FetchTask):\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n <function token>\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n<class token>\n\n\nclass FetchDataFiles(FetchTask):\n <function token>\n <function token>\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass FetchLocalDataFiles(FetchTask):\n \"\"\"Fetch data that's already on the local installation.\n\n e.g. \"Manual\" assets which are downloaded by humans, \"Repository\" assets\n which are present in this git repo.\n \"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass FetchLocalDataFiles(FetchTask):\n <docstring token>\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n if isinstance(self.asset_cfg, RepositoryAsset):\n with temporary_path_dir(self.output()) as temp_path:\n evaluated_filepath = self.asset_cfg.filepath.eval()\n out_path = temp_path / evaluated_filepath.name\n shutil.copy2(evaluated_filepath, out_path)\n elif isinstance(self.asset_cfg, ManualAsset):\n local_dir = PRIVATE_ARCHIVE_DIR / self.dataset_cfg.id\n with temporary_path_dir(self.output()) as temp_path:\n shutil.copytree(local_dir, temp_path, dirs_exist_ok=True)\n else:\n raise RuntimeError(\n f'You selected an unsupported access_method: {type(self.asset_cfg)}'\n )\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass FetchLocalDataFiles(FetchTask):\n <docstring token>\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n <function token>\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass FetchLocalDataFiles(FetchTask):\n <docstring token>\n <function token>\n <function token>\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FetchDataWithCommand(FetchTask):\n \"\"\"Fetch data using a command, writing to '{output_dir}'.\"\"\"\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FetchDataWithCommand(FetchTask):\n <docstring token>\n\n def output(self):\n return luigi.LocalTarget(FETCH_DATASETS_DIR / self.output_name,\n format=luigi.format.Nop)\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FetchDataWithCommand(FetchTask):\n <docstring token>\n <function token>\n\n def run(self):\n with temporary_path_dir(self.output()) as temp_path:\n run_qgr_command(interpolate_args(self.asset_cfg.args,\n output_dir=temp_path))\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FetchDataWithCommand(FetchTask):\n <docstring token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,330 |
8f7b591fd1a87f5ba307d8b8b4d8652e2551ba66
|
import numpy as np
import pandas as pd
from sklearn.svm import SVR
import random
from sklearn import cross_validation
from sklearn.metrics import mean_squared_error
data2013 = pd.read_csv('expamles20_0.txt').ix[0:93]
data2014 = pd.read_csv('data_2014.txt')
features2013 = data2013.drop('odds',axis=1)
features2014 = data2014.drop('odds',axis=1)
target2013 = data2013['odds']
target2014 = data2014['odds']
class_ = []
for i in range(10):
class_.append(pd.read_csv('class_'+str(i)+'.txt')['name'].values)
svr_model = SVR(kernel='rbf', C=1e2, gamma=0.05)
i = random.randint(0,len(class_[0])-1)
j = random.randint(0,len(class_[1])-1)
k = random.randint(0,len(class_[2])-1)
l = random.randint(0,len(class_[3])-1)
m = random.randint(0,len(class_[4])-1)
n = random.randint(0,len(class_[5])-1)
p = random.randint(0,len(class_[6])-1)
q = random.randint(0,len(class_[7])-1)
s = random.randint(0,len(class_[8])-1)
t = random.randint(0,len(class_[9])-1)
cv_avr = []
for fold in range(10):
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
features2013[[class_[0][i],class_[1][j],
class_[2][k],class_[3][l],
class_[4][m],class_[5][n],
class_[6][p],class_[7][q],
class_[8][s],class_[9][t]]],target2013, test_size=0.15, random_state=fold)
#evaluate fitness function
SVRmodel = SVR(C=1e2, gamma=0.05, kernel='rbf')
SVRmodel.fit(X_train,y_train)
cv_avr.append(SVRmodel.score(X_test,y_test))
R2 = 1.0*sum(cv_avr)/len(cv_avr)
f = open('KMdata10.txt','wb')
f.write('generation,train,validation,test,validmse,testmse\n')
f2 = open('features10.txt','wb')
iteration = 0
while (R2 < 0.9 and iteration < 500):
old = i,j,k,l,m,n,p,q,s,t
r = random.random()
if r < 0.2:
i = random.randint(0,len(class_[0])-1)
elif r < 0.3:
j = random.randint(0,len(class_[1])-1)
elif r < 0.5:
k = random.randint(0,len(class_[2])-1)
elif r < 0.55:
l = random.randint(0,len(class_[3])-1)
elif r < 0.65:
m = random.randint(0,len(class_[4])-1)
elif r < 0.7:
n = random.randint(0,len(class_[5])-1)
elif r < 0.75:
p = random.randint(0,len(class_[6])-1)
elif r < 0.85:
q = random.randint(0,len(class_[7])-1)
elif r < 0.9:
s = random.randint(0,len(class_[8])-1)
else:
t = random.randint(0,len(class_[9])-1)
r = random.random()
if r < 0.2:
i = random.randint(0,len(class_[0])-1)
elif r < 0.3:
j = random.randint(0,len(class_[1])-1)
elif r < 0.5:
k = random.randint(0,len(class_[2])-1)
elif r < 0.55:
l = random.randint(0,len(class_[3])-1)
elif r < 0.65:
m = random.randint(0,len(class_[4])-1)
elif r < 0.7:
n = random.randint(0,len(class_[5])-1)
elif r < 0.75:
p = random.randint(0,len(class_[6])-1)
elif r < 0.85:
q = random.randint(0,len(class_[7])-1)
elif r < 0.9:
s = random.randint(0,len(class_[8])-1)
else:
t = random.randint(0,len(class_[9])-1)
cv_avr = []
mse_avr = []
tr_avr = []
for fold in range(5):
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
features2013[[class_[0][i],class_[1][j],
class_[2][k],class_[3][l],
class_[4][m],class_[5][n],
class_[6][p],class_[7][q],
class_[8][s],class_[9][t]]],target2013, test_size=0.15, random_state=fold)
#evaluate fitness function
SVRmodel = SVR(C=1e2,gamma=0.05, kernel='rbf')
SVRmodel.fit(X_train,y_train)
tr_avr.append(SVRmodel.score(X_train,y_train))
cv_avr.append(SVRmodel.score(X_test,y_test))
mse_avr.append(mean_squared_error(SVRmodel.predict(X_test),y_test))
tr_R2 = 1.0*sum(tr_avr)/len(tr_avr)
newR2 = 1.0*sum(cv_avr)/len(cv_avr)
mse = 1.0*sum(mse_avr)/len(mse_avr)
if newR2 <= R2:
i,j,k,l,m,n,p,q,s,t = old
print iteration
print i,j,k,l,m
# print features2013[[class_[0][i],class_[1][j],class_[2][k],class_[3][l],class_[4][m]]].columns
print newR2
SVRmodel = SVR(C=1e2, gamma=0.05, kernel='rbf')
SVRmodel.fit(features2013[[class_[0][i],class_[1][j],
class_[2][k],class_[3][l],
class_[4][m],class_[5][n],
class_[6][p],class_[7][q],
class_[8][s],class_[9][t]]],target2013)
test_R2 = SVRmodel.score(features2014[[class_[0][i],class_[1][j],
class_[2][k],class_[3][l],
class_[4][m],class_[5][n],
class_[6][p],class_[7][q],
class_[8][s],class_[9][t]]],target2014)
test_mse = mean_squared_error(SVRmodel.predict(features2014[[class_[0][i],class_[1][j],
class_[2][k],class_[3][l],
class_[4][m],class_[5][n],
class_[6][p],class_[7][q],
class_[8][s],class_[9][t]]]),target2014)
f.write(str(iteration)+','+str(tr_R2)+','+str(newR2)+','+str(test_R2)+','+str(mse)+','+str(test_mse)+'\n')
f2.write('iteration: '+str(iteration)+'\n')
f2.write('features: '+'\n')
for ii in range(10):
f2.write('\t'+str(features2013[[class_[0][i],class_[1][j],
class_[2][k],class_[3][l],
class_[4][m],class_[5][n],
class_[6][p],class_[7][q],
class_[8][s],class_[9][t]]].columns.values[ii])+'\n')
f2.write('\n')
R2 = newR2
iteration += 1
f.close()
f2.close()
|
[
"import numpy as np\nimport pandas as pd\nfrom sklearn.svm import SVR\nimport random\nfrom sklearn import cross_validation\nfrom sklearn.metrics import mean_squared_error\n\ndata2013 = pd.read_csv('expamles20_0.txt').ix[0:93]\ndata2014 = pd.read_csv('data_2014.txt')\n\nfeatures2013 = data2013.drop('odds',axis=1)\nfeatures2014 = data2014.drop('odds',axis=1)\n\ntarget2013 = data2013['odds']\ntarget2014 = data2014['odds']\n\nclass_ = []\nfor i in range(10):\n\tclass_.append(pd.read_csv('class_'+str(i)+'.txt')['name'].values)\n\nsvr_model = SVR(kernel='rbf', C=1e2, gamma=0.05)\n\ni = random.randint(0,len(class_[0])-1)\nj = random.randint(0,len(class_[1])-1)\nk = random.randint(0,len(class_[2])-1)\nl = random.randint(0,len(class_[3])-1)\nm = random.randint(0,len(class_[4])-1)\nn = random.randint(0,len(class_[5])-1)\np = random.randint(0,len(class_[6])-1)\nq = random.randint(0,len(class_[7])-1)\ns = random.randint(0,len(class_[8])-1)\nt = random.randint(0,len(class_[9])-1)\n\ncv_avr = []\nfor fold in range(10):\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(\n \tfeatures2013[[class_[0][i],class_[1][j],\n\t\tclass_[2][k],class_[3][l],\n\t\tclass_[4][m],class_[5][n],\n\t\tclass_[6][p],class_[7][q],\n\t\tclass_[8][s],class_[9][t]]],target2013, test_size=0.15, random_state=fold)\n\t#evaluate fitness function\n\tSVRmodel = SVR(C=1e2, gamma=0.05, kernel='rbf')\n\tSVRmodel.fit(X_train,y_train)\n\tcv_avr.append(SVRmodel.score(X_test,y_test))\nR2 = 1.0*sum(cv_avr)/len(cv_avr)\n\nf = open('KMdata10.txt','wb')\nf.write('generation,train,validation,test,validmse,testmse\\n')\nf2 = open('features10.txt','wb')\niteration = 0\n\nwhile (R2 < 0.9 and iteration < 500):\n\told = i,j,k,l,m,n,p,q,s,t\n\tr = random.random()\n\tif r < 0.2:\n\t\ti = random.randint(0,len(class_[0])-1)\n\telif r < 0.3:\n\t\tj = random.randint(0,len(class_[1])-1)\n\telif r < 0.5:\n\t\tk = random.randint(0,len(class_[2])-1)\n\telif r < 0.55:\n\t\tl = random.randint(0,len(class_[3])-1)\n\telif r < 0.65:\n\t\tm = random.randint(0,len(class_[4])-1)\n\telif r < 0.7:\n\t\tn = random.randint(0,len(class_[5])-1)\n\telif r < 0.75:\n\t\tp = random.randint(0,len(class_[6])-1)\n\telif r < 0.85:\n\t\tq = random.randint(0,len(class_[7])-1)\n\telif r < 0.9:\n\t\ts = random.randint(0,len(class_[8])-1)\n\telse:\n\t\tt = random.randint(0,len(class_[9])-1)\n\n\tr = random.random()\n\tif r < 0.2:\n\t\ti = random.randint(0,len(class_[0])-1)\n\telif r < 0.3:\n\t\tj = random.randint(0,len(class_[1])-1)\n\telif r < 0.5:\n\t\tk = random.randint(0,len(class_[2])-1)\n\telif r < 0.55:\n\t\tl = random.randint(0,len(class_[3])-1)\n\telif r < 0.65:\n\t\tm = random.randint(0,len(class_[4])-1)\n\telif r < 0.7:\n\t\tn = random.randint(0,len(class_[5])-1)\n\telif r < 0.75:\n\t\tp = random.randint(0,len(class_[6])-1)\n\telif r < 0.85:\n\t\tq = random.randint(0,len(class_[7])-1)\n\telif r < 0.9:\n\t\ts = random.randint(0,len(class_[8])-1)\n\telse:\n\t\tt = random.randint(0,len(class_[9])-1)\n\n\tcv_avr = []\n\tmse_avr = []\n\ttr_avr = []\n\tfor fold in range(5):\n\t\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(\n\t \tfeatures2013[[class_[0][i],class_[1][j],\n\t\t\tclass_[2][k],class_[3][l],\n\t\t\tclass_[4][m],class_[5][n],\n\t\t\tclass_[6][p],class_[7][q],\n\t\t\tclass_[8][s],class_[9][t]]],target2013, test_size=0.15, random_state=fold)\n\t\t#evaluate fitness function\n\t\tSVRmodel = SVR(C=1e2,gamma=0.05, kernel='rbf')\n\t\tSVRmodel.fit(X_train,y_train)\n\t\ttr_avr.append(SVRmodel.score(X_train,y_train))\n\t\tcv_avr.append(SVRmodel.score(X_test,y_test))\n\t\tmse_avr.append(mean_squared_error(SVRmodel.predict(X_test),y_test))\n\ttr_R2 = 1.0*sum(tr_avr)/len(tr_avr)\n\tnewR2 = 1.0*sum(cv_avr)/len(cv_avr)\n\tmse = 1.0*sum(mse_avr)/len(mse_avr)\n\n\tif newR2 <= R2:\n\t\ti,j,k,l,m,n,p,q,s,t = old\n\t\n\tprint iteration\n\tprint i,j,k,l,m\n\t# print features2013[[class_[0][i],class_[1][j],class_[2][k],class_[3][l],class_[4][m]]].columns\n\tprint newR2\n\n\tSVRmodel = SVR(C=1e2, gamma=0.05, kernel='rbf')\n\tSVRmodel.fit(features2013[[class_[0][i],class_[1][j],\n\t\t\tclass_[2][k],class_[3][l],\n\t\t\tclass_[4][m],class_[5][n],\n\t\t\tclass_[6][p],class_[7][q],\n\t\t\tclass_[8][s],class_[9][t]]],target2013)\n\ttest_R2 = SVRmodel.score(features2014[[class_[0][i],class_[1][j],\n\t\t\tclass_[2][k],class_[3][l],\n\t\t\tclass_[4][m],class_[5][n],\n\t\t\tclass_[6][p],class_[7][q],\n\t\t\tclass_[8][s],class_[9][t]]],target2014)\n\ttest_mse = mean_squared_error(SVRmodel.predict(features2014[[class_[0][i],class_[1][j],\n\t\t\tclass_[2][k],class_[3][l],\n\t\t\tclass_[4][m],class_[5][n],\n\t\t\tclass_[6][p],class_[7][q],\n\t\t\tclass_[8][s],class_[9][t]]]),target2014)\n\t\n\tf.write(str(iteration)+','+str(tr_R2)+','+str(newR2)+','+str(test_R2)+','+str(mse)+','+str(test_mse)+'\\n')\n\tf2.write('iteration: '+str(iteration)+'\\n')\n\tf2.write('features: '+'\\n')\n\tfor ii in range(10):\n\t\tf2.write('\\t'+str(features2013[[class_[0][i],class_[1][j],\n\t\t\tclass_[2][k],class_[3][l],\n\t\t\tclass_[4][m],class_[5][n],\n\t\t\tclass_[6][p],class_[7][q],\n\t\t\tclass_[8][s],class_[9][t]]].columns.values[ii])+'\\n')\n\tf2.write('\\n')\n\tR2 = newR2\n\n\titeration += 1\n\nf.close()\nf2.close()\n"
] | true |
98,331 |
acf756711d97764ebd226aeafb84db93a79e3d92
|
__author__ = 'Joseph Conlin'
"""
Tests for page objects
"""
from TestBrowser import TestBrowser
from HeaderPage import Header
from TheatersPage import Theaters
import TheaterDetailPage
from FileInput import ReadExcel
import unittest
from random import randint
# Setup some common test variables
_headerSearchText = "Provo, UT"
_headerSearchTextNoSpaces = "ABC123"
class HeaderTests(unittest.TestCase):
def setUp(self):
self.driver = TestBrowser().get_browser()
self.header = Header(self.driver)
def tearDown(self):
self.driver.quit()
def test_search(self):
currentPage = self.driver.current_url
self.header.do_search(_headerSearchTextNoSpaces)
newPage = self.driver.current_url
self.assertNotEqual(currentPage, newPage, "Searching did not navigate to a new page")
def test_search_random_input_from_excel(self):
# Get a random row greater than 0 to avoid the header and get that search data from the default input file
# [0] is the search string, [1] is the theater index, [2] is theater name, [3] is zip code
index = randint(1,6)
input = ReadExcel.get_sheet_values()
searchText = input[index][0]
expectedZip = str(input[index][3])
currentPage = self.driver.current_url
self.header.do_search(searchText)
newPage = self.driver.current_url
self.assertNotEqual(currentPage, newPage, "Searching did not navigate to a new page")
self.assertIn(expectedZip, self.driver.page_source, "Expected zip code not found in results page")
class TheatersTests(unittest.TestCase):
def setUp(self):
self.driver = TestBrowser().get_browser()
# For internal testing purposes, navigate to the theater search results page
self.header = Header(self.driver)
self.header.do_search(_headerSearchText)
self.theaters = Theaters(self.driver)
def tearDown(self):
self.driver.quit()
def test_theaters_list(self):
self.assertNotEqual(0, len(self.theaters.theatersList), "Did not create theaters list")
self.assertNotEqual(0, len(self.theaters.theatersList[0]), "Did not get a valid list of theaters")
class TheaterDetailTests(unittest.TestCase):
def setUp(self):
self.driver = TestBrowser().get_browser()
# For internal testing purposes, navigate to a theater details page
self.header = Header(self.driver)
self.header.do_search(_headerSearchText)
self.theaters = Theaters(self.driver)
self.theater = TheaterDetailPage.TheaterDetail(self.driver)
self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)
def tearDown(self):
self.driver.quit()
def test_change_days(self):
currentSelectDate = self.theaterCalendar.selectedDate
self.theaterCalendar.click_date_by_index(2)
newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)
newSelectDate = newTheaterCalendar.selectedDate
self.assertNotEqual(currentSelectDate, newSelectDate,
"Selecting a different day did not navigate to a new page")
def test_movies_list_different_days(self):
currentMovieList = self.theater.theaterMoviesList
currentSelectDate = self.theaterCalendar.selectedDate
self.theaterCalendar.click_date_by_index(1)
newTheater = TheaterDetailPage.TheaterDetail(self.driver)
newMovieList = newTheater.theaterMoviesList
newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)
newSelectDate = newTheaterCalendar.selectedDate
self.assertNotEqual(currentSelectDate+currentMovieList[0].movieShowTimeList[0],
newSelectDate+newMovieList[0].movieShowTimeList[0],
"Movie date and time from today matches movie date and time from tomorrow")
def test_search_random_input_from_excel(self):
# Get a random row greater than 0 to avoid the header and get that search data from the default input file
# [0] is the search string, [1] is the theater index, [2] is theater name, [3] is zip code
index = randint(1,6)
input = ReadExcel.get_sheet_values()
searchText = input[index][0]
theaterIndex = input[index][1]
theaterText = input[index][2]
if(_headerSearchText != searchText):
# Setup did a different search than we want - redo the search and update the variables
self.header = Header(self.driver)
self.header.do_search(searchText)
self.theaters = Theaters(self.driver)
self.theater = TheaterDetailPage.TheaterDetail(self.driver, theaterIndex)
theaterName = self.theater.theaterName
self.assertIn(theaterText.lower(), theaterName.lower(),
"Did not end up on theater detail page for selected theater")
if __name__ == '__main__':
# suite = unittest.TestLoader().loadTestsFromTestCase(HeaderTests)
testsToRun = [
HeaderTests,
TheatersTests,
TheaterDetailTests,
]
suite = unittest.TestSuite([unittest.TestLoader().loadTestsFromTestCase(test) for test in testsToRun])
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"__author__ = 'Joseph Conlin'\n\"\"\"\nTests for page objects\n\"\"\"\nfrom TestBrowser import TestBrowser\nfrom HeaderPage import Header\nfrom TheatersPage import Theaters\nimport TheaterDetailPage\nfrom FileInput import ReadExcel\n\nimport unittest\nfrom random import randint\n\n# Setup some common test variables\n_headerSearchText = \"Provo, UT\"\n_headerSearchTextNoSpaces = \"ABC123\"\n\n\nclass HeaderTests(unittest.TestCase):\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_search(self):\n currentPage = self.driver.current_url\n self.header.do_search(_headerSearchTextNoSpaces)\n newPage = self.driver.current_url\n self.assertNotEqual(currentPage, newPage, \"Searching did not navigate to a new page\")\n\n def test_search_random_input_from_excel(self):\n # Get a random row greater than 0 to avoid the header and get that search data from the default input file\n # [0] is the search string, [1] is the theater index, [2] is theater name, [3] is zip code\n index = randint(1,6)\n input = ReadExcel.get_sheet_values()\n\n searchText = input[index][0]\n expectedZip = str(input[index][3])\n currentPage = self.driver.current_url\n self.header.do_search(searchText)\n newPage = self.driver.current_url\n self.assertNotEqual(currentPage, newPage, \"Searching did not navigate to a new page\")\n self.assertIn(expectedZip, self.driver.page_source, \"Expected zip code not found in results page\")\n\n\nclass TheatersTests(unittest.TestCase):\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n # For internal testing purposes, navigate to the theater search results page\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_theaters_list(self):\n self.assertNotEqual(0, len(self.theaters.theatersList), \"Did not create theaters list\")\n self.assertNotEqual(0, len(self.theaters.theatersList[0]), \"Did not get a valid list of theaters\")\n\n\nclass TheaterDetailTests(unittest.TestCase):\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n # For internal testing purposes, navigate to a theater details page\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n \"Selecting a different day did not navigate to a new page\")\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate+currentMovieList[0].movieShowTimeList[0],\n newSelectDate+newMovieList[0].movieShowTimeList[0],\n \"Movie date and time from today matches movie date and time from tomorrow\")\n\n def test_search_random_input_from_excel(self):\n # Get a random row greater than 0 to avoid the header and get that search data from the default input file\n # [0] is the search string, [1] is the theater index, [2] is theater name, [3] is zip code\n index = randint(1,6)\n input = ReadExcel.get_sheet_values()\n\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n\n if(_headerSearchText != searchText):\n # Setup did a different search than we want - redo the search and update the variables\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver, theaterIndex)\n\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n \"Did not end up on theater detail page for selected theater\")\n\n\nif __name__ == '__main__':\n # suite = unittest.TestLoader().loadTestsFromTestCase(HeaderTests)\n testsToRun = [\n HeaderTests,\n TheatersTests,\n TheaterDetailTests,\n ]\n suite = unittest.TestSuite([unittest.TestLoader().loadTestsFromTestCase(test) for test in testsToRun])\n unittest.TextTestRunner(verbosity=2).run(suite)\n",
"__author__ = 'Joseph Conlin'\n<docstring token>\nfrom TestBrowser import TestBrowser\nfrom HeaderPage import Header\nfrom TheatersPage import Theaters\nimport TheaterDetailPage\nfrom FileInput import ReadExcel\nimport unittest\nfrom random import randint\n_headerSearchText = 'Provo, UT'\n_headerSearchTextNoSpaces = 'ABC123'\n\n\nclass HeaderTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_search(self):\n currentPage = self.driver.current_url\n self.header.do_search(_headerSearchTextNoSpaces)\n newPage = self.driver.current_url\n self.assertNotEqual(currentPage, newPage,\n 'Searching did not navigate to a new page')\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n expectedZip = str(input[index][3])\n currentPage = self.driver.current_url\n self.header.do_search(searchText)\n newPage = self.driver.current_url\n self.assertNotEqual(currentPage, newPage,\n 'Searching did not navigate to a new page')\n self.assertIn(expectedZip, self.driver.page_source,\n 'Expected zip code not found in results page')\n\n\nclass TheatersTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_theaters_list(self):\n self.assertNotEqual(0, len(self.theaters.theatersList),\n 'Did not create theaters list')\n self.assertNotEqual(0, len(self.theaters.theatersList[0]),\n 'Did not get a valid list of theaters')\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\nif __name__ == '__main__':\n testsToRun = [HeaderTests, TheatersTests, TheaterDetailTests]\n suite = unittest.TestSuite([unittest.TestLoader().loadTestsFromTestCase\n (test) for test in testsToRun])\n unittest.TextTestRunner(verbosity=2).run(suite)\n",
"__author__ = 'Joseph Conlin'\n<docstring token>\n<import token>\n_headerSearchText = 'Provo, UT'\n_headerSearchTextNoSpaces = 'ABC123'\n\n\nclass HeaderTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_search(self):\n currentPage = self.driver.current_url\n self.header.do_search(_headerSearchTextNoSpaces)\n newPage = self.driver.current_url\n self.assertNotEqual(currentPage, newPage,\n 'Searching did not navigate to a new page')\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n expectedZip = str(input[index][3])\n currentPage = self.driver.current_url\n self.header.do_search(searchText)\n newPage = self.driver.current_url\n self.assertNotEqual(currentPage, newPage,\n 'Searching did not navigate to a new page')\n self.assertIn(expectedZip, self.driver.page_source,\n 'Expected zip code not found in results page')\n\n\nclass TheatersTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_theaters_list(self):\n self.assertNotEqual(0, len(self.theaters.theatersList),\n 'Did not create theaters list')\n self.assertNotEqual(0, len(self.theaters.theatersList[0]),\n 'Did not get a valid list of theaters')\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\nif __name__ == '__main__':\n testsToRun = [HeaderTests, TheatersTests, TheaterDetailTests]\n suite = unittest.TestSuite([unittest.TestLoader().loadTestsFromTestCase\n (test) for test in testsToRun])\n unittest.TextTestRunner(verbosity=2).run(suite)\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass HeaderTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_search(self):\n currentPage = self.driver.current_url\n self.header.do_search(_headerSearchTextNoSpaces)\n newPage = self.driver.current_url\n self.assertNotEqual(currentPage, newPage,\n 'Searching did not navigate to a new page')\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n expectedZip = str(input[index][3])\n currentPage = self.driver.current_url\n self.header.do_search(searchText)\n newPage = self.driver.current_url\n self.assertNotEqual(currentPage, newPage,\n 'Searching did not navigate to a new page')\n self.assertIn(expectedZip, self.driver.page_source,\n 'Expected zip code not found in results page')\n\n\nclass TheatersTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_theaters_list(self):\n self.assertNotEqual(0, len(self.theaters.theatersList),\n 'Did not create theaters list')\n self.assertNotEqual(0, len(self.theaters.theatersList[0]),\n 'Did not get a valid list of theaters')\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\nif __name__ == '__main__':\n testsToRun = [HeaderTests, TheatersTests, TheaterDetailTests]\n suite = unittest.TestSuite([unittest.TestLoader().loadTestsFromTestCase\n (test) for test in testsToRun])\n unittest.TextTestRunner(verbosity=2).run(suite)\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass HeaderTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_search(self):\n currentPage = self.driver.current_url\n self.header.do_search(_headerSearchTextNoSpaces)\n newPage = self.driver.current_url\n self.assertNotEqual(currentPage, newPage,\n 'Searching did not navigate to a new page')\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n expectedZip = str(input[index][3])\n currentPage = self.driver.current_url\n self.header.do_search(searchText)\n newPage = self.driver.current_url\n self.assertNotEqual(currentPage, newPage,\n 'Searching did not navigate to a new page')\n self.assertIn(expectedZip, self.driver.page_source,\n 'Expected zip code not found in results page')\n\n\nclass TheatersTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_theaters_list(self):\n self.assertNotEqual(0, len(self.theaters.theatersList),\n 'Did not create theaters list')\n self.assertNotEqual(0, len(self.theaters.theatersList[0]),\n 'Did not get a valid list of theaters')\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass HeaderTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_search(self):\n currentPage = self.driver.current_url\n self.header.do_search(_headerSearchTextNoSpaces)\n newPage = self.driver.current_url\n self.assertNotEqual(currentPage, newPage,\n 'Searching did not navigate to a new page')\n <function token>\n\n\nclass TheatersTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_theaters_list(self):\n self.assertNotEqual(0, len(self.theaters.theatersList),\n 'Did not create theaters list')\n self.assertNotEqual(0, len(self.theaters.theatersList[0]),\n 'Did not get a valid list of theaters')\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass HeaderTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n <function token>\n\n def test_search(self):\n currentPage = self.driver.current_url\n self.header.do_search(_headerSearchTextNoSpaces)\n newPage = self.driver.current_url\n self.assertNotEqual(currentPage, newPage,\n 'Searching did not navigate to a new page')\n <function token>\n\n\nclass TheatersTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_theaters_list(self):\n self.assertNotEqual(0, len(self.theaters.theatersList),\n 'Did not create theaters list')\n self.assertNotEqual(0, len(self.theaters.theatersList[0]),\n 'Did not get a valid list of theaters')\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass HeaderTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n <function token>\n <function token>\n <function token>\n\n\nclass TheatersTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_theaters_list(self):\n self.assertNotEqual(0, len(self.theaters.theatersList),\n 'Did not create theaters list')\n self.assertNotEqual(0, len(self.theaters.theatersList[0]),\n 'Did not get a valid list of theaters')\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass HeaderTests(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TheatersTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_theaters_list(self):\n self.assertNotEqual(0, len(self.theaters.theatersList),\n 'Did not create theaters list')\n self.assertNotEqual(0, len(self.theaters.theatersList[0]),\n 'Did not get a valid list of theaters')\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass TheatersTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_theaters_list(self):\n self.assertNotEqual(0, len(self.theaters.theatersList),\n 'Did not create theaters list')\n self.assertNotEqual(0, len(self.theaters.theatersList[0]),\n 'Did not get a valid list of theaters')\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass TheatersTests(unittest.TestCase):\n <function token>\n\n def tearDown(self):\n self.driver.quit()\n\n def test_theaters_list(self):\n self.assertNotEqual(0, len(self.theaters.theatersList),\n 'Did not create theaters list')\n self.assertNotEqual(0, len(self.theaters.theatersList[0]),\n 'Did not get a valid list of theaters')\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass TheatersTests(unittest.TestCase):\n <function token>\n\n def tearDown(self):\n self.driver.quit()\n <function token>\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass TheatersTests(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass TheaterDetailTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = TestBrowser().get_browser()\n self.header = Header(self.driver)\n self.header.do_search(_headerSearchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver)\n self.theaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass TheaterDetailTests(unittest.TestCase):\n <function token>\n\n def tearDown(self):\n self.driver.quit()\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass TheaterDetailTests(unittest.TestCase):\n <function token>\n <function token>\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n\n def test_search_random_input_from_excel(self):\n index = randint(1, 6)\n input = ReadExcel.get_sheet_values()\n searchText = input[index][0]\n theaterIndex = input[index][1]\n theaterText = input[index][2]\n if _headerSearchText != searchText:\n self.header = Header(self.driver)\n self.header.do_search(searchText)\n self.theaters = Theaters(self.driver)\n self.theater = TheaterDetailPage.TheaterDetail(self.driver,\n theaterIndex)\n theaterName = self.theater.theaterName\n self.assertIn(theaterText.lower(), theaterName.lower(),\n 'Did not end up on theater detail page for selected theater')\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass TheaterDetailTests(unittest.TestCase):\n <function token>\n <function token>\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n\n def test_movies_list_different_days(self):\n currentMovieList = self.theater.theaterMoviesList\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(1)\n newTheater = TheaterDetailPage.TheaterDetail(self.driver)\n newMovieList = newTheater.theaterMoviesList\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate + currentMovieList[0].\n movieShowTimeList[0], newSelectDate + newMovieList[0].\n movieShowTimeList[0],\n 'Movie date and time from today matches movie date and time from tomorrow'\n )\n <function token>\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass TheaterDetailTests(unittest.TestCase):\n <function token>\n <function token>\n\n def test_change_days(self):\n currentSelectDate = self.theaterCalendar.selectedDate\n self.theaterCalendar.click_date_by_index(2)\n newTheaterCalendar = TheaterDetailPage.TheaterCalendar(self.driver)\n newSelectDate = newTheaterCalendar.selectedDate\n self.assertNotEqual(currentSelectDate, newSelectDate,\n 'Selecting a different day did not navigate to a new page')\n <function token>\n <function token>\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass TheaterDetailTests(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n"
] | false |
98,332 |
f96dc14b831445b25ed320390de1cf841a43c39c
|
from twisted.internet import defer
from fluiddb.api.util import getCategoryAndAction, getOperation
from fluiddb.common.types_thrift.ttypes import (
TNonexistentTag, TBadRequest, TNonexistentNamespace, TPathPermissionDenied,
TPolicyAndExceptions, TInvalidPolicy, TNoSuchUser, TInvalidUsername)
from fluiddb.data.exceptions import UnknownUserError
from fluiddb.data.permission import Operation, Policy
from fluiddb.model.exceptions import (
UnknownPathError, UserNotAllowedInExceptionError)
from fluiddb.security.exceptions import PermissionDeniedError
from fluiddb.security.permission import SecurePermissionAPI
class FacadePermissionMixin(object):
def getPermission(self, session, category, action, path):
"""Get permissions for a given path.
@param session: The L{AuthenticatedSession} for the request.
@param category: A C{unicode} indicating the category of the
permission.
@param action: A C{unicode} indicating the action of the permission.
@param path: The L{Namespace.path} or L{Tag.path} to get permissions
from.
@raise TBadRequest: Raised if the given C{action} or C{category} are
invalid.
@raise TNonexistentNamespace: Raised if the given L{Namespace} path
does not exist.
@raise TNonexistentTag: Raised if the given L{Tag} path does not exist.
@raise TPathPermissionDenied: Raised if the user does not have
C{CONTROL} permissions on the given L{Namespace} or L{Tag}.
@return: A C{Deferred} that will fire with a L{TPolicyAndExceptions}
object containing the policy and exceptions list for the requested
permission.
"""
path = path.decode('utf-8')
try:
operation = getOperation(category, action)
except KeyError as error:
session.log.exception(error)
error = TBadRequest(
'Action %r not possible on category %r.' % (action, category))
return defer.fail(error)
def run():
permissions = SecurePermissionAPI(session.auth.user)
try:
result = permissions.get([(path, operation)])
except UnknownPathError as error:
session.log.exception(error)
unknownPath = error.paths[0]
if operation in Operation.TAG_OPERATIONS:
raise TNonexistentTag(unknownPath.encode('utf-8'))
if operation in Operation.NAMESPACE_OPERATIONS:
raise TNonexistentNamespace(unknownPath.encode('utf-8'))
raise
except PermissionDeniedError as error:
session.log.exception(error)
deniedPath, deniedOperation = error.pathsAndOperations[0]
deniedCategory, deniedAction = getCategoryAndAction(
deniedOperation)
raise TPathPermissionDenied(deniedPath, deniedCategory,
deniedAction)
policy, exceptions = result[(path, operation)]
policy = str(policy).lower()
return TPolicyAndExceptions(policy=policy, exceptions=exceptions)
return session.transact.run(run)
def updatePermission(self, session, category, action, path,
policyAndExceptions):
"""Update permissions for a given path.
@param session: The L{AuthenticatedSession} for the request.
@param category: A C{unicode} indicating the category of the
permission.
@param action: A C{unicode} indicating the action of the permission.
@param path: The L{Namespace.path} or L{Tag.path} to get permissions
from.
@param policyAndExceptions: A L{TPolicyAndExceptions} object containing
the policy and exceptions list for the permission.
@raise TBadRequest: Raised if the given C{action} or C{category} are
invalid.
@raise TInvalidPolicy: Raised if the policy given in
C{policyAndExceptions} is invalid.
@raise TNonexistentNamespace: Raised if the given L{Namespace} path
does not exist.
@raise TNonexistentTag: Raised if the given L{Tag} path does not exist.
@raise TPathPermissionDenied: Raised if the user does not have
C{CONTROL} permissions on the given L{Namespace} or L{Tag}.
@return: A C{Deferred} that will fire with a C{None} if the operation
was successful.
"""
path = path.decode('utf-8')
try:
operation = getOperation(category, action)
except KeyError as error:
session.log.exception(error)
error = TBadRequest(
'Action %r not possible on category %r.' % (action, category))
return defer.fail(error)
policy = policyAndExceptions.policy
if policy not in ('open', 'closed'):
return defer.fail(TInvalidPolicy())
policy = Policy.OPEN if policy == 'open' else Policy.CLOSED
exceptions = policyAndExceptions.exceptions
def run():
permissions = SecurePermissionAPI(session.auth.user)
try:
permissions.set([(path, operation, policy, exceptions)])
except UnknownPathError as error:
session.log.exception(error)
unknownPath = error.paths[0]
if operation in Operation.TAG_OPERATIONS:
raise TNonexistentTag(unknownPath.encode('utf-8'))
if operation in Operation.NAMESPACE_OPERATIONS:
raise TNonexistentNamespace(unknownPath.encode('utf-8'))
raise
except UnknownUserError as error:
# FIXME There could be more than one unknown username, but
# TNoSuchUser can only be passed a single username, so we'll
# only pass the first one. Ideally, we'd be able to pass all
# of them.
raise TNoSuchUser(error.usernames[0].encode('utf-8'))
except UserNotAllowedInExceptionError as error:
raise TInvalidUsername(str(error))
except PermissionDeniedError as error:
session.log.exception(error)
deniedPath, deniedOperation = error.pathsAndOperations[0]
deniedCategory, deniedAction = getCategoryAndAction(
deniedOperation)
raise TPathPermissionDenied(deniedPath, deniedCategory,
deniedAction)
return session.transact.run(run)
|
[
"from twisted.internet import defer\n\nfrom fluiddb.api.util import getCategoryAndAction, getOperation\nfrom fluiddb.common.types_thrift.ttypes import (\n TNonexistentTag, TBadRequest, TNonexistentNamespace, TPathPermissionDenied,\n TPolicyAndExceptions, TInvalidPolicy, TNoSuchUser, TInvalidUsername)\nfrom fluiddb.data.exceptions import UnknownUserError\nfrom fluiddb.data.permission import Operation, Policy\nfrom fluiddb.model.exceptions import (\n UnknownPathError, UserNotAllowedInExceptionError)\nfrom fluiddb.security.exceptions import PermissionDeniedError\nfrom fluiddb.security.permission import SecurePermissionAPI\n\n\nclass FacadePermissionMixin(object):\n\n def getPermission(self, session, category, action, path):\n \"\"\"Get permissions for a given path.\n\n @param session: The L{AuthenticatedSession} for the request.\n @param category: A C{unicode} indicating the category of the\n permission.\n @param action: A C{unicode} indicating the action of the permission.\n @param path: The L{Namespace.path} or L{Tag.path} to get permissions\n from.\n @raise TBadRequest: Raised if the given C{action} or C{category} are\n invalid.\n @raise TNonexistentNamespace: Raised if the given L{Namespace} path\n does not exist.\n @raise TNonexistentTag: Raised if the given L{Tag} path does not exist.\n @raise TPathPermissionDenied: Raised if the user does not have\n C{CONTROL} permissions on the given L{Namespace} or L{Tag}.\n @return: A C{Deferred} that will fire with a L{TPolicyAndExceptions}\n object containing the policy and exceptions list for the requested\n permission.\n \"\"\"\n path = path.decode('utf-8')\n\n try:\n operation = getOperation(category, action)\n except KeyError as error:\n session.log.exception(error)\n error = TBadRequest(\n 'Action %r not possible on category %r.' % (action, category))\n return defer.fail(error)\n\n def run():\n permissions = SecurePermissionAPI(session.auth.user)\n try:\n result = permissions.get([(path, operation)])\n except UnknownPathError as error:\n session.log.exception(error)\n unknownPath = error.paths[0]\n if operation in Operation.TAG_OPERATIONS:\n raise TNonexistentTag(unknownPath.encode('utf-8'))\n if operation in Operation.NAMESPACE_OPERATIONS:\n raise TNonexistentNamespace(unknownPath.encode('utf-8'))\n raise\n except PermissionDeniedError as error:\n session.log.exception(error)\n deniedPath, deniedOperation = error.pathsAndOperations[0]\n deniedCategory, deniedAction = getCategoryAndAction(\n deniedOperation)\n raise TPathPermissionDenied(deniedPath, deniedCategory,\n deniedAction)\n\n policy, exceptions = result[(path, operation)]\n policy = str(policy).lower()\n return TPolicyAndExceptions(policy=policy, exceptions=exceptions)\n\n return session.transact.run(run)\n\n def updatePermission(self, session, category, action, path,\n policyAndExceptions):\n \"\"\"Update permissions for a given path.\n\n @param session: The L{AuthenticatedSession} for the request.\n @param category: A C{unicode} indicating the category of the\n permission.\n @param action: A C{unicode} indicating the action of the permission.\n @param path: The L{Namespace.path} or L{Tag.path} to get permissions\n from.\n @param policyAndExceptions: A L{TPolicyAndExceptions} object containing\n the policy and exceptions list for the permission.\n @raise TBadRequest: Raised if the given C{action} or C{category} are\n invalid.\n @raise TInvalidPolicy: Raised if the policy given in\n C{policyAndExceptions} is invalid.\n @raise TNonexistentNamespace: Raised if the given L{Namespace} path\n does not exist.\n @raise TNonexistentTag: Raised if the given L{Tag} path does not exist.\n @raise TPathPermissionDenied: Raised if the user does not have\n C{CONTROL} permissions on the given L{Namespace} or L{Tag}.\n @return: A C{Deferred} that will fire with a C{None} if the operation\n was successful.\n \"\"\"\n path = path.decode('utf-8')\n\n try:\n operation = getOperation(category, action)\n except KeyError as error:\n session.log.exception(error)\n error = TBadRequest(\n 'Action %r not possible on category %r.' % (action, category))\n return defer.fail(error)\n\n policy = policyAndExceptions.policy\n if policy not in ('open', 'closed'):\n return defer.fail(TInvalidPolicy())\n policy = Policy.OPEN if policy == 'open' else Policy.CLOSED\n exceptions = policyAndExceptions.exceptions\n\n def run():\n permissions = SecurePermissionAPI(session.auth.user)\n try:\n permissions.set([(path, operation, policy, exceptions)])\n except UnknownPathError as error:\n session.log.exception(error)\n unknownPath = error.paths[0]\n if operation in Operation.TAG_OPERATIONS:\n raise TNonexistentTag(unknownPath.encode('utf-8'))\n if operation in Operation.NAMESPACE_OPERATIONS:\n raise TNonexistentNamespace(unknownPath.encode('utf-8'))\n raise\n except UnknownUserError as error:\n # FIXME There could be more than one unknown username, but\n # TNoSuchUser can only be passed a single username, so we'll\n # only pass the first one. Ideally, we'd be able to pass all\n # of them.\n raise TNoSuchUser(error.usernames[0].encode('utf-8'))\n except UserNotAllowedInExceptionError as error:\n raise TInvalidUsername(str(error))\n except PermissionDeniedError as error:\n session.log.exception(error)\n deniedPath, deniedOperation = error.pathsAndOperations[0]\n deniedCategory, deniedAction = getCategoryAndAction(\n deniedOperation)\n raise TPathPermissionDenied(deniedPath, deniedCategory,\n deniedAction)\n\n return session.transact.run(run)\n",
"from twisted.internet import defer\nfrom fluiddb.api.util import getCategoryAndAction, getOperation\nfrom fluiddb.common.types_thrift.ttypes import TNonexistentTag, TBadRequest, TNonexistentNamespace, TPathPermissionDenied, TPolicyAndExceptions, TInvalidPolicy, TNoSuchUser, TInvalidUsername\nfrom fluiddb.data.exceptions import UnknownUserError\nfrom fluiddb.data.permission import Operation, Policy\nfrom fluiddb.model.exceptions import UnknownPathError, UserNotAllowedInExceptionError\nfrom fluiddb.security.exceptions import PermissionDeniedError\nfrom fluiddb.security.permission import SecurePermissionAPI\n\n\nclass FacadePermissionMixin(object):\n\n def getPermission(self, session, category, action, path):\n \"\"\"Get permissions for a given path.\n\n @param session: The L{AuthenticatedSession} for the request.\n @param category: A C{unicode} indicating the category of the\n permission.\n @param action: A C{unicode} indicating the action of the permission.\n @param path: The L{Namespace.path} or L{Tag.path} to get permissions\n from.\n @raise TBadRequest: Raised if the given C{action} or C{category} are\n invalid.\n @raise TNonexistentNamespace: Raised if the given L{Namespace} path\n does not exist.\n @raise TNonexistentTag: Raised if the given L{Tag} path does not exist.\n @raise TPathPermissionDenied: Raised if the user does not have\n C{CONTROL} permissions on the given L{Namespace} or L{Tag}.\n @return: A C{Deferred} that will fire with a L{TPolicyAndExceptions}\n object containing the policy and exceptions list for the requested\n permission.\n \"\"\"\n path = path.decode('utf-8')\n try:\n operation = getOperation(category, action)\n except KeyError as error:\n session.log.exception(error)\n error = TBadRequest('Action %r not possible on category %r.' %\n (action, category))\n return defer.fail(error)\n\n def run():\n permissions = SecurePermissionAPI(session.auth.user)\n try:\n result = permissions.get([(path, operation)])\n except UnknownPathError as error:\n session.log.exception(error)\n unknownPath = error.paths[0]\n if operation in Operation.TAG_OPERATIONS:\n raise TNonexistentTag(unknownPath.encode('utf-8'))\n if operation in Operation.NAMESPACE_OPERATIONS:\n raise TNonexistentNamespace(unknownPath.encode('utf-8'))\n raise\n except PermissionDeniedError as error:\n session.log.exception(error)\n deniedPath, deniedOperation = error.pathsAndOperations[0]\n deniedCategory, deniedAction = getCategoryAndAction(\n deniedOperation)\n raise TPathPermissionDenied(deniedPath, deniedCategory,\n deniedAction)\n policy, exceptions = result[path, operation]\n policy = str(policy).lower()\n return TPolicyAndExceptions(policy=policy, exceptions=exceptions)\n return session.transact.run(run)\n\n def updatePermission(self, session, category, action, path,\n policyAndExceptions):\n \"\"\"Update permissions for a given path.\n\n @param session: The L{AuthenticatedSession} for the request.\n @param category: A C{unicode} indicating the category of the\n permission.\n @param action: A C{unicode} indicating the action of the permission.\n @param path: The L{Namespace.path} or L{Tag.path} to get permissions\n from.\n @param policyAndExceptions: A L{TPolicyAndExceptions} object containing\n the policy and exceptions list for the permission.\n @raise TBadRequest: Raised if the given C{action} or C{category} are\n invalid.\n @raise TInvalidPolicy: Raised if the policy given in\n C{policyAndExceptions} is invalid.\n @raise TNonexistentNamespace: Raised if the given L{Namespace} path\n does not exist.\n @raise TNonexistentTag: Raised if the given L{Tag} path does not exist.\n @raise TPathPermissionDenied: Raised if the user does not have\n C{CONTROL} permissions on the given L{Namespace} or L{Tag}.\n @return: A C{Deferred} that will fire with a C{None} if the operation\n was successful.\n \"\"\"\n path = path.decode('utf-8')\n try:\n operation = getOperation(category, action)\n except KeyError as error:\n session.log.exception(error)\n error = TBadRequest('Action %r not possible on category %r.' %\n (action, category))\n return defer.fail(error)\n policy = policyAndExceptions.policy\n if policy not in ('open', 'closed'):\n return defer.fail(TInvalidPolicy())\n policy = Policy.OPEN if policy == 'open' else Policy.CLOSED\n exceptions = policyAndExceptions.exceptions\n\n def run():\n permissions = SecurePermissionAPI(session.auth.user)\n try:\n permissions.set([(path, operation, policy, exceptions)])\n except UnknownPathError as error:\n session.log.exception(error)\n unknownPath = error.paths[0]\n if operation in Operation.TAG_OPERATIONS:\n raise TNonexistentTag(unknownPath.encode('utf-8'))\n if operation in Operation.NAMESPACE_OPERATIONS:\n raise TNonexistentNamespace(unknownPath.encode('utf-8'))\n raise\n except UnknownUserError as error:\n raise TNoSuchUser(error.usernames[0].encode('utf-8'))\n except UserNotAllowedInExceptionError as error:\n raise TInvalidUsername(str(error))\n except PermissionDeniedError as error:\n session.log.exception(error)\n deniedPath, deniedOperation = error.pathsAndOperations[0]\n deniedCategory, deniedAction = getCategoryAndAction(\n deniedOperation)\n raise TPathPermissionDenied(deniedPath, deniedCategory,\n deniedAction)\n return session.transact.run(run)\n",
"<import token>\n\n\nclass FacadePermissionMixin(object):\n\n def getPermission(self, session, category, action, path):\n \"\"\"Get permissions for a given path.\n\n @param session: The L{AuthenticatedSession} for the request.\n @param category: A C{unicode} indicating the category of the\n permission.\n @param action: A C{unicode} indicating the action of the permission.\n @param path: The L{Namespace.path} or L{Tag.path} to get permissions\n from.\n @raise TBadRequest: Raised if the given C{action} or C{category} are\n invalid.\n @raise TNonexistentNamespace: Raised if the given L{Namespace} path\n does not exist.\n @raise TNonexistentTag: Raised if the given L{Tag} path does not exist.\n @raise TPathPermissionDenied: Raised if the user does not have\n C{CONTROL} permissions on the given L{Namespace} or L{Tag}.\n @return: A C{Deferred} that will fire with a L{TPolicyAndExceptions}\n object containing the policy and exceptions list for the requested\n permission.\n \"\"\"\n path = path.decode('utf-8')\n try:\n operation = getOperation(category, action)\n except KeyError as error:\n session.log.exception(error)\n error = TBadRequest('Action %r not possible on category %r.' %\n (action, category))\n return defer.fail(error)\n\n def run():\n permissions = SecurePermissionAPI(session.auth.user)\n try:\n result = permissions.get([(path, operation)])\n except UnknownPathError as error:\n session.log.exception(error)\n unknownPath = error.paths[0]\n if operation in Operation.TAG_OPERATIONS:\n raise TNonexistentTag(unknownPath.encode('utf-8'))\n if operation in Operation.NAMESPACE_OPERATIONS:\n raise TNonexistentNamespace(unknownPath.encode('utf-8'))\n raise\n except PermissionDeniedError as error:\n session.log.exception(error)\n deniedPath, deniedOperation = error.pathsAndOperations[0]\n deniedCategory, deniedAction = getCategoryAndAction(\n deniedOperation)\n raise TPathPermissionDenied(deniedPath, deniedCategory,\n deniedAction)\n policy, exceptions = result[path, operation]\n policy = str(policy).lower()\n return TPolicyAndExceptions(policy=policy, exceptions=exceptions)\n return session.transact.run(run)\n\n def updatePermission(self, session, category, action, path,\n policyAndExceptions):\n \"\"\"Update permissions for a given path.\n\n @param session: The L{AuthenticatedSession} for the request.\n @param category: A C{unicode} indicating the category of the\n permission.\n @param action: A C{unicode} indicating the action of the permission.\n @param path: The L{Namespace.path} or L{Tag.path} to get permissions\n from.\n @param policyAndExceptions: A L{TPolicyAndExceptions} object containing\n the policy and exceptions list for the permission.\n @raise TBadRequest: Raised if the given C{action} or C{category} are\n invalid.\n @raise TInvalidPolicy: Raised if the policy given in\n C{policyAndExceptions} is invalid.\n @raise TNonexistentNamespace: Raised if the given L{Namespace} path\n does not exist.\n @raise TNonexistentTag: Raised if the given L{Tag} path does not exist.\n @raise TPathPermissionDenied: Raised if the user does not have\n C{CONTROL} permissions on the given L{Namespace} or L{Tag}.\n @return: A C{Deferred} that will fire with a C{None} if the operation\n was successful.\n \"\"\"\n path = path.decode('utf-8')\n try:\n operation = getOperation(category, action)\n except KeyError as error:\n session.log.exception(error)\n error = TBadRequest('Action %r not possible on category %r.' %\n (action, category))\n return defer.fail(error)\n policy = policyAndExceptions.policy\n if policy not in ('open', 'closed'):\n return defer.fail(TInvalidPolicy())\n policy = Policy.OPEN if policy == 'open' else Policy.CLOSED\n exceptions = policyAndExceptions.exceptions\n\n def run():\n permissions = SecurePermissionAPI(session.auth.user)\n try:\n permissions.set([(path, operation, policy, exceptions)])\n except UnknownPathError as error:\n session.log.exception(error)\n unknownPath = error.paths[0]\n if operation in Operation.TAG_OPERATIONS:\n raise TNonexistentTag(unknownPath.encode('utf-8'))\n if operation in Operation.NAMESPACE_OPERATIONS:\n raise TNonexistentNamespace(unknownPath.encode('utf-8'))\n raise\n except UnknownUserError as error:\n raise TNoSuchUser(error.usernames[0].encode('utf-8'))\n except UserNotAllowedInExceptionError as error:\n raise TInvalidUsername(str(error))\n except PermissionDeniedError as error:\n session.log.exception(error)\n deniedPath, deniedOperation = error.pathsAndOperations[0]\n deniedCategory, deniedAction = getCategoryAndAction(\n deniedOperation)\n raise TPathPermissionDenied(deniedPath, deniedCategory,\n deniedAction)\n return session.transact.run(run)\n",
"<import token>\n\n\nclass FacadePermissionMixin(object):\n <function token>\n\n def updatePermission(self, session, category, action, path,\n policyAndExceptions):\n \"\"\"Update permissions for a given path.\n\n @param session: The L{AuthenticatedSession} for the request.\n @param category: A C{unicode} indicating the category of the\n permission.\n @param action: A C{unicode} indicating the action of the permission.\n @param path: The L{Namespace.path} or L{Tag.path} to get permissions\n from.\n @param policyAndExceptions: A L{TPolicyAndExceptions} object containing\n the policy and exceptions list for the permission.\n @raise TBadRequest: Raised if the given C{action} or C{category} are\n invalid.\n @raise TInvalidPolicy: Raised if the policy given in\n C{policyAndExceptions} is invalid.\n @raise TNonexistentNamespace: Raised if the given L{Namespace} path\n does not exist.\n @raise TNonexistentTag: Raised if the given L{Tag} path does not exist.\n @raise TPathPermissionDenied: Raised if the user does not have\n C{CONTROL} permissions on the given L{Namespace} or L{Tag}.\n @return: A C{Deferred} that will fire with a C{None} if the operation\n was successful.\n \"\"\"\n path = path.decode('utf-8')\n try:\n operation = getOperation(category, action)\n except KeyError as error:\n session.log.exception(error)\n error = TBadRequest('Action %r not possible on category %r.' %\n (action, category))\n return defer.fail(error)\n policy = policyAndExceptions.policy\n if policy not in ('open', 'closed'):\n return defer.fail(TInvalidPolicy())\n policy = Policy.OPEN if policy == 'open' else Policy.CLOSED\n exceptions = policyAndExceptions.exceptions\n\n def run():\n permissions = SecurePermissionAPI(session.auth.user)\n try:\n permissions.set([(path, operation, policy, exceptions)])\n except UnknownPathError as error:\n session.log.exception(error)\n unknownPath = error.paths[0]\n if operation in Operation.TAG_OPERATIONS:\n raise TNonexistentTag(unknownPath.encode('utf-8'))\n if operation in Operation.NAMESPACE_OPERATIONS:\n raise TNonexistentNamespace(unknownPath.encode('utf-8'))\n raise\n except UnknownUserError as error:\n raise TNoSuchUser(error.usernames[0].encode('utf-8'))\n except UserNotAllowedInExceptionError as error:\n raise TInvalidUsername(str(error))\n except PermissionDeniedError as error:\n session.log.exception(error)\n deniedPath, deniedOperation = error.pathsAndOperations[0]\n deniedCategory, deniedAction = getCategoryAndAction(\n deniedOperation)\n raise TPathPermissionDenied(deniedPath, deniedCategory,\n deniedAction)\n return session.transact.run(run)\n",
"<import token>\n\n\nclass FacadePermissionMixin(object):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,333 |
cb95ef7cece27bd87aea1e8bb359f81d56fb662a
|
from xml.etree import ElementTree as et
import paxb as pb
def test_root_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<test_model>
<element1>value1</element1>
</test_model>
'''
@pb.model(name='test_model')
class TestModel:
element1 = pb.field()
model = pb.from_xml(TestModel, xml)
assert model.element1 == 'value1'
def test_attribute_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestModel attrib1="value1" attrib2="value2"/>
'''
@pb.model
class TestModel:
attrib1 = pb.attr()
attrib2 = pb.attr()
model = pb.from_xml(TestModel, xml)
assert model.attrib1 == 'value1'
assert model.attrib2 == 'value2'
def test_attribute_deserialization_with_name():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestModel attribute1="value1" attribute2="value2"/>
'''
@pb.model
class TestModel:
attrib1 = pb.attr(name='attribute1')
attrib2 = pb.attr(name='attribute2')
model = pb.from_xml(TestModel, xml)
assert model.attrib1 == 'value1'
assert model.attrib2 == 'value2'
def test_element_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestModel>
<element1>value1</element1>
<element2>value2</element2>
</TestModel>
'''
@pb.model
class TestModel:
element1 = pb.field()
element2 = pb.field()
model = pb.from_xml(TestModel, xml)
assert model.element1 == 'value1'
assert model.element2 == 'value2'
def test_element_deserialization_with_name():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestModel>
<element1>value1</element1>
<element2>value2</element2>
</TestModel>
'''
@pb.model
class TestModel:
elem1 = pb.field(name='element1')
elem2 = pb.field(name='element2')
model = pb.from_xml(TestModel, xml)
assert model.elem1 == 'value1'
assert model.elem2 == 'value2'
def test_wrapper_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestModel>
<wrapper1>
<wrapper2>
<element1>value1</element1>
</wrapper2>
</wrapper1>
</TestModel>
'''
@pb.model
class TestModel:
element1 = pb.wrap('wrapper1', pb.wrap('wrapper2', pb.field()))
model = pb.from_xml(TestModel, xml)
assert model.element1 == 'value1'
def test_wrapper_deserialization_with_path():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestModel>
<wrapper1>
<wrapper2>
<element1>value1</element1>
</wrapper2>
</wrapper1>
</TestModel>
'''
@pb.model
class TestModel:
element1 = pb.wrap('wrapper1/wrapper2', pb.field())
model = pb.from_xml(TestModel, xml)
assert model.element1 == 'value1'
def test_inheritance_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestRootModel>
<TestBaseModel>
<element1>value1</element1>
</TestBaseModel>
<TestExtendedModel>
<element1>value2</element1>
<element2>value3</element2>
</TestExtendedModel>
</TestRootModel>
'''
@pb.model
class TestBaseModel:
element1 = pb.field()
@pb.model
class TestExtendedModel(TestBaseModel):
element2 = pb.field()
@pb.model
class TestRootModel:
model1 = pb.nested(TestBaseModel)
model2 = pb.nested(TestExtendedModel)
model = pb.from_xml(TestRootModel, xml)
assert model.model1.element1 == 'value1'
assert model.model2.element1 == 'value2'
assert model.model2.element2 == 'value3'
def test_nested_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestModel>
<NestedModel1>
<NestedModel2>
<element>value</element>
</NestedModel2>
</NestedModel1>
</TestModel>
'''
@pb.model
class NestedModel2:
element = pb.field()
@pb.model
class NestedModel1:
nested = pb.nested(NestedModel2)
@pb.model
class TestModel:
nested = pb.nested(NestedModel1)
model = pb.from_xml(TestModel, xml)
assert model.nested.nested.element == 'value'
def test_element_list_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestModel>
<element1>value1</element1>
<element1>value2</element1>
</TestModel>
'''
@pb.model
class TestModel:
element1 = pb.as_list(pb.field())
model = pb.from_xml(TestModel, xml)
assert model.element1 == ['value1', 'value2']
def test_wrapper_list_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestModel>
<wrapper>
<element>value1</element>
</wrapper>
<wrapper>
<element>value2</element>
</wrapper>
</TestModel>
'''
@pb.model
class TestModel:
elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))
model = pb.from_xml(TestModel, xml)
assert model.elements == ['value1', 'value2']
def test_nested_list_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestModel>
<NestedModel>
<element>value1</element>
</NestedModel>
<NestedModel>
<element>value2</element>
</NestedModel>
</TestModel>
'''
@pb.model
class NestedModel:
element = pb.field()
@pb.model
class TestModel:
elements = pb.as_list(pb.nested(NestedModel))
model = pb.from_xml(TestModel, xml)
assert len(model.elements) == 2
assert model.elements[0].element == 'value1'
assert model.elements[1].element == 'value2'
def test_list_of_list_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestModel>
<element1>
<element2>value1</element2>
<element2>value2</element2>
</element1>
<element1>
<element2>value3</element2>
<element2>value4</element2>
</element1>
</TestModel>
'''
@pb.model
class TestModel:
elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field('element2'))))
model = pb.from_xml(TestModel, xml)
assert model.elements[0][0] == 'value1'
assert model.elements[0][1] == 'value2'
assert model.elements[1][0] == 'value3'
assert model.elements[1][1] == 'value4'
def test_namespaces_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<testns1:TestModel xmlns:testns1="http://www.test1.org"
xmlns:testns2="http://www.test2.org"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.test.com schema.xsd">
<element1>value1</element1>
<testns1:element2>value2</testns1:element2>
<testns2:element3>value3</testns2:element3>
<testns1:element4 xmlns:testns2="http://www.test22.org">
<element5>value5</element5>
<testns2:element6>value6</testns2:element6>
</testns1:element4>
</testns1:TestModel>
'''
@pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})
class TestModel:
schema = pb.attribute('schemaLocation', ns='xsi')
element1 = pb.field(ns='')
element2 = pb.field()
element3 = pb.field(ns='testns2')
element5 = pb.wrap('element4', pb.field(ns=''))
element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={'testns2': 'http://www.test22.org'})
model = pb.from_xml(TestModel, xml, ns_map={
'testns1': 'http://www.test1.org',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
})
assert model.schema == 'http://www.test.com schema.xsd'
assert model.element1 == 'value1'
assert model.element2 == 'value2'
assert model.element3 == 'value3'
assert model.element5 == 'value5'
assert model.element6 == 'value6'
def test_complex_xml_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<envelope xmlns="http://www.test.org"
xmlns:doc="http://www.test1.org"
xmlns:data="http://www.test2.org">
<doc:user name="Alexey" surname="Ivanov" age="26">
<doc:contacts>
<doc:phone>+79204563539</doc:phone>
<doc:email>[email protected]</doc:email>
<doc:email>[email protected]</doc:email>
</doc:contacts>
<doc:documents>
<doc:passport series="3127" number="836815"/>
</doc:documents>
<data:occupations xmlns:data="http://www.test22.org">
<data:occupation title="yandex">
<data:address>Moscow</data:address>
<data:employees>8854</data:employees>
</data:occupation>
<data:occupation title="skbkontur">
<data:address>Yekaterinburg</data:address>
<data:employees>7742</data:employees>
</data:occupation>
</data:occupations>
</doc:user>
</envelope>
'''
@pb.model(name='occupation', ns='data', ns_map={'data': 'http://www.test22.org'})
class Occupation:
title = pb.attr()
address = pb.field()
employees = pb.field(converter=int)
@pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})
class User:
name = pb.attr()
surname = pb.attr()
age = pb.attr(converter=int)
phone = pb.wrap('contacts', pb.field())
emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))
passport_series = pb.wrap('documents/passport', pb.attr('series'))
passport_number = pb.wrap('documents/passport', pb.attr('number'))
occupations = pb.wrap(
'occupations', pb.lst(pb.nested(Occupation)), ns='data', ns_map={'data': 'http://www.test22.org'}
)
citizenship = pb.field(default='RU')
xml = et.fromstring(xml)
user = pb.from_xml(User, xml)
assert user.name == 'Alexey'
assert user.surname == 'Ivanov'
assert user.age == 26
assert user.phone == '+79204563539'
assert user.emails == ['[email protected]', '[email protected]']
assert user.passport_series == '3127'
assert user.passport_number == '836815'
assert len(user.occupations) == 2
assert user.occupations[0].title == 'yandex'
assert user.occupations[0].address == 'Moscow'
assert user.occupations[0].employees == 8854
assert user.occupations[1].title == 'skbkontur'
assert user.occupations[1].address == 'Yekaterinburg'
assert user.occupations[1].employees == 7742
assert user.citizenship == 'RU'
def test_indexes_deserialization():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<root>
<element>value1</element>
<element>value2</element>
<wrapper>
<element>value3</element>
</wrapper>
<wrapper>
<element>value4</element>
</wrapper>
<nested>
<element>value5</element>
</nested>
<nested>
<element>value6</element>
</nested>
</root>
'''
@pb.model(name='nested')
class Nested:
field = pb.field('element')
@pb.model(name='root')
class TestModel:
field1 = pb.field('element', idx=1)
field2 = pb.field('element', idx=2)
field3 = pb.wrap('wrapper', pb.field('element'), idx=1)
field4 = pb.wrap('wrapper', pb.field('element'), idx=2)
nested1 = pb.nested(Nested, idx=1)
nested2 = pb.nested(Nested, idx=2)
model = pb.from_xml(TestModel, xml)
assert model.field1 == 'value1'
assert model.field2 == 'value2'
assert model.field3 == 'value3'
assert model.field4 == 'value4'
assert model.nested1.field == 'value5'
assert model.nested2.field == 'value6'
def test_nested_default():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<test_model>
</test_model>
'''
@pb.model(name='nested_model')
class NestedModel:
field = pb.field()
@pb.model(name='test_model')
class TestModel:
nested = pb.nested(NestedModel, default=None)
obj = pb.from_xml(TestModel, xml)
assert obj.nested is None
def test_field_default():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<test_model>
</test_model>
'''
@pb.model(name='test_model')
class TestModel:
field = pb.field(default=None)
obj = pb.from_xml(TestModel, xml)
assert obj.field is None
def test_attribute_default():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<test_model>
</test_model>
'''
@pb.model(name='test_model')
class TestModel:
attrib = pb.attr(default=None)
obj = pb.from_xml(TestModel, xml)
assert obj.attrib is None
def test_private_attributes():
xml = '''<?xml version="1.0" encoding="utf-8"?>
<TestModel>
<field1>value1</field1>
<field2>value2</field2>
</TestModel>
'''
@pb.model()
class TestModel:
_field1 = pb.field(name='field1')
__field2 = pb.field(name='field2')
obj = pb.from_xml(TestModel, xml)
assert obj._field1 == 'value1'
assert obj._TestModel__field2 == 'value2'
def test_dict_deserialization():
@pb.model
class Nested:
fields = pb.as_list(pb.field())
@pb.model
class TestModel:
field = pb.field()
nested = pb.as_list(pb.nested(Nested))
data = {
'field': 'value1',
'nested': [
{
'fields': ['value21', 'value22'],
},
{
'fields': ['value31', 'value32'],
},
]
}
obj = TestModel(**data)
assert obj.field == 'value1'
assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(fields=['value31', 'value32'])]
|
[
"from xml.etree import ElementTree as et\nimport paxb as pb\n\n\ndef test_root_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n '''\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n\n model = pb.from_xml(TestModel, xml)\n\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n '''\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n\n model = pb.from_xml(TestModel, xml)\n\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\ndef test_attribute_deserialization_with_name():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attribute1=\"value1\" attribute2=\"value2\"/>\n '''\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr(name='attribute1')\n attrib2 = pb.attr(name='attribute2')\n\n model = pb.from_xml(TestModel, xml)\n\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\ndef test_element_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n '''\n\n @pb.model\n class TestModel:\n element1 = pb.field()\n element2 = pb.field()\n\n model = pb.from_xml(TestModel, xml)\n\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n\n\ndef test_element_deserialization_with_name():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n '''\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n\n model = pb.from_xml(TestModel, xml)\n\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\ndef test_wrapper_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n '''\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1', pb.wrap('wrapper2', pb.field()))\n\n model = pb.from_xml(TestModel, xml)\n\n assert model.element1 == 'value1'\n\n\ndef test_wrapper_deserialization_with_path():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n '''\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n\n model = pb.from_xml(TestModel, xml)\n\n assert model.element1 == 'value1'\n\n\ndef test_inheritance_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestRootModel>\n <TestBaseModel>\n <element1>value1</element1>\n </TestBaseModel>\n <TestExtendedModel>\n <element1>value2</element1>\n <element2>value3</element2>\n </TestExtendedModel>\n </TestRootModel>\n '''\n\n @pb.model\n class TestBaseModel:\n element1 = pb.field()\n\n @pb.model\n class TestExtendedModel(TestBaseModel):\n element2 = pb.field()\n\n @pb.model\n class TestRootModel:\n model1 = pb.nested(TestBaseModel)\n model2 = pb.nested(TestExtendedModel)\n\n model = pb.from_xml(TestRootModel, xml)\n\n assert model.model1.element1 == 'value1'\n assert model.model2.element1 == 'value2'\n assert model.model2.element2 == 'value3'\n\n\ndef test_nested_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n '''\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n\n model = pb.from_xml(TestModel, xml)\n\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n '''\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n\n model = pb.from_xml(TestModel, xml)\n\n assert model.element1 == ['value1', 'value2']\n\n\ndef test_wrapper_list_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper>\n <element>value1</element>\n </wrapper>\n <wrapper>\n <element>value2</element>\n </wrapper>\n </TestModel>\n '''\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))\n\n model = pb.from_xml(TestModel, xml)\n\n assert model.elements == ['value1', 'value2']\n\n\ndef test_nested_list_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel>\n <element>value1</element>\n </NestedModel>\n <NestedModel>\n <element>value2</element>\n </NestedModel>\n </TestModel>\n '''\n\n @pb.model\n class NestedModel:\n element = pb.field()\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.nested(NestedModel))\n\n model = pb.from_xml(TestModel, xml)\n\n assert len(model.elements) == 2\n assert model.elements[0].element == 'value1'\n assert model.elements[1].element == 'value2'\n\n\ndef test_list_of_list_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n '''\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field('element2'))))\n\n model = pb.from_xml(TestModel, xml)\n\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n '''\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={'testns2': 'http://www.test22.org'})\n\n model = pb.from_xml(TestModel, xml, ns_map={\n 'testns1': 'http://www.test1.org',\n 'xsi': 'http://www.w3.org/2001/XMLSchema-instance',\n })\n\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n '''\n\n @pb.model(name='occupation', ns='data', ns_map={'data': 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n\n occupations = pb.wrap(\n 'occupations', pb.lst(pb.nested(Occupation)), ns='data', ns_map={'data': 'http://www.test22.org'}\n )\n\n citizenship = pb.field(default='RU')\n\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n\n assert len(user.occupations) == 2\n\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n\n assert user.citizenship == 'RU'\n\n\ndef test_indexes_deserialization():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <root>\n <element>value1</element>\n <element>value2</element>\n\n <wrapper>\n <element>value3</element>\n </wrapper>\n <wrapper>\n <element>value4</element>\n </wrapper>\n\n <nested>\n <element>value5</element>\n </nested>\n <nested>\n <element>value6</element>\n </nested>\n </root>\n '''\n\n @pb.model(name='nested')\n class Nested:\n field = pb.field('element')\n\n @pb.model(name='root')\n class TestModel:\n field1 = pb.field('element', idx=1)\n field2 = pb.field('element', idx=2)\n\n field3 = pb.wrap('wrapper', pb.field('element'), idx=1)\n field4 = pb.wrap('wrapper', pb.field('element'), idx=2)\n\n nested1 = pb.nested(Nested, idx=1)\n nested2 = pb.nested(Nested, idx=2)\n\n model = pb.from_xml(TestModel, xml)\n\n assert model.field1 == 'value1'\n assert model.field2 == 'value2'\n assert model.field3 == 'value3'\n assert model.field4 == 'value4'\n assert model.nested1.field == 'value5'\n assert model.nested2.field == 'value6'\n\n\ndef test_nested_default():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n '''\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\ndef test_field_default():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n '''\n\n @pb.model(name='test_model')\n class TestModel:\n field = pb.field(default=None)\n\n obj = pb.from_xml(TestModel, xml)\n assert obj.field is None\n\n\ndef test_attribute_default():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n '''\n\n @pb.model(name='test_model')\n class TestModel:\n attrib = pb.attr(default=None)\n\n obj = pb.from_xml(TestModel, xml)\n assert obj.attrib is None\n\n\ndef test_private_attributes():\n xml = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <field1>value1</field1>\n <field2>value2</field2>\n </TestModel>\n '''\n\n @pb.model()\n class TestModel:\n _field1 = pb.field(name='field1')\n __field2 = pb.field(name='field2')\n\n obj = pb.from_xml(TestModel, xml)\n\n assert obj._field1 == 'value1'\n assert obj._TestModel__field2 == 'value2'\n\n\ndef test_dict_deserialization():\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n\n data = {\n 'field': 'value1',\n 'nested': [\n {\n 'fields': ['value21', 'value22'],\n },\n {\n 'fields': ['value31', 'value32'],\n },\n ]\n }\n\n obj = TestModel(**data)\n\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(fields=['value31', 'value32'])]\n",
"from xml.etree import ElementTree as et\nimport paxb as pb\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\ndef test_attribute_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attribute1=\"value1\" attribute2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr(name='attribute1')\n attrib2 = pb.attr(name='attribute2')\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\ndef test_element_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.field()\n element2 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n\n\ndef test_element_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n model = pb.from_xml(TestModel, xml)\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\ndef test_wrapper_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1', pb.wrap('wrapper2', pb.field()))\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_inheritance_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestRootModel>\n <TestBaseModel>\n <element1>value1</element1>\n </TestBaseModel>\n <TestExtendedModel>\n <element1>value2</element1>\n <element2>value3</element2>\n </TestExtendedModel>\n </TestRootModel>\n \"\"\"\n\n\n @pb.model\n class TestBaseModel:\n element1 = pb.field()\n\n\n @pb.model\n class TestExtendedModel(TestBaseModel):\n element2 = pb.field()\n\n\n @pb.model\n class TestRootModel:\n model1 = pb.nested(TestBaseModel)\n model2 = pb.nested(TestExtendedModel)\n model = pb.from_xml(TestRootModel, xml)\n assert model.model1.element1 == 'value1'\n assert model.model2.element1 == 'value2'\n assert model.model2.element2 == 'value3'\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\ndef test_wrapper_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper>\n <element>value1</element>\n </wrapper>\n <wrapper>\n <element>value2</element>\n </wrapper>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))\n model = pb.from_xml(TestModel, xml)\n assert model.elements == ['value1', 'value2']\n\n\ndef test_nested_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel>\n <element>value1</element>\n </NestedModel>\n <NestedModel>\n <element>value2</element>\n </NestedModel>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel:\n element = pb.field()\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.nested(NestedModel))\n model = pb.from_xml(TestModel, xml)\n assert len(model.elements) == 2\n assert model.elements[0].element == 'value1'\n assert model.elements[1].element == 'value2'\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\ndef test_indexes_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <root>\n <element>value1</element>\n <element>value2</element>\n\n <wrapper>\n <element>value3</element>\n </wrapper>\n <wrapper>\n <element>value4</element>\n </wrapper>\n\n <nested>\n <element>value5</element>\n </nested>\n <nested>\n <element>value6</element>\n </nested>\n </root>\n \"\"\"\n\n\n @pb.model(name='nested')\n class Nested:\n field = pb.field('element')\n\n\n @pb.model(name='root')\n class TestModel:\n field1 = pb.field('element', idx=1)\n field2 = pb.field('element', idx=2)\n field3 = pb.wrap('wrapper', pb.field('element'), idx=1)\n field4 = pb.wrap('wrapper', pb.field('element'), idx=2)\n nested1 = pb.nested(Nested, idx=1)\n nested2 = pb.nested(Nested, idx=2)\n model = pb.from_xml(TestModel, xml)\n assert model.field1 == 'value1'\n assert model.field2 == 'value2'\n assert model.field3 == 'value3'\n assert model.field4 == 'value4'\n assert model.nested1.field == 'value5'\n assert model.nested2.field == 'value6'\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\ndef test_field_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n field = pb.field(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.field is None\n\n\ndef test_attribute_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n attrib = pb.attr(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.attrib is None\n\n\ndef test_private_attributes():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <field1>value1</field1>\n <field2>value2</field2>\n </TestModel>\n \"\"\"\n\n\n @pb.model()\n class TestModel:\n _field1 = pb.field(name='field1')\n __field2 = pb.field(name='field2')\n obj = pb.from_xml(TestModel, xml)\n assert obj._field1 == 'value1'\n assert obj._TestModel__field2 == 'value2'\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\ndef test_attribute_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attribute1=\"value1\" attribute2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr(name='attribute1')\n attrib2 = pb.attr(name='attribute2')\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\ndef test_element_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.field()\n element2 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n\n\ndef test_element_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n model = pb.from_xml(TestModel, xml)\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\ndef test_wrapper_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1', pb.wrap('wrapper2', pb.field()))\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_inheritance_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestRootModel>\n <TestBaseModel>\n <element1>value1</element1>\n </TestBaseModel>\n <TestExtendedModel>\n <element1>value2</element1>\n <element2>value3</element2>\n </TestExtendedModel>\n </TestRootModel>\n \"\"\"\n\n\n @pb.model\n class TestBaseModel:\n element1 = pb.field()\n\n\n @pb.model\n class TestExtendedModel(TestBaseModel):\n element2 = pb.field()\n\n\n @pb.model\n class TestRootModel:\n model1 = pb.nested(TestBaseModel)\n model2 = pb.nested(TestExtendedModel)\n model = pb.from_xml(TestRootModel, xml)\n assert model.model1.element1 == 'value1'\n assert model.model2.element1 == 'value2'\n assert model.model2.element2 == 'value3'\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\ndef test_wrapper_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper>\n <element>value1</element>\n </wrapper>\n <wrapper>\n <element>value2</element>\n </wrapper>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))\n model = pb.from_xml(TestModel, xml)\n assert model.elements == ['value1', 'value2']\n\n\ndef test_nested_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel>\n <element>value1</element>\n </NestedModel>\n <NestedModel>\n <element>value2</element>\n </NestedModel>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel:\n element = pb.field()\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.nested(NestedModel))\n model = pb.from_xml(TestModel, xml)\n assert len(model.elements) == 2\n assert model.elements[0].element == 'value1'\n assert model.elements[1].element == 'value2'\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\ndef test_indexes_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <root>\n <element>value1</element>\n <element>value2</element>\n\n <wrapper>\n <element>value3</element>\n </wrapper>\n <wrapper>\n <element>value4</element>\n </wrapper>\n\n <nested>\n <element>value5</element>\n </nested>\n <nested>\n <element>value6</element>\n </nested>\n </root>\n \"\"\"\n\n\n @pb.model(name='nested')\n class Nested:\n field = pb.field('element')\n\n\n @pb.model(name='root')\n class TestModel:\n field1 = pb.field('element', idx=1)\n field2 = pb.field('element', idx=2)\n field3 = pb.wrap('wrapper', pb.field('element'), idx=1)\n field4 = pb.wrap('wrapper', pb.field('element'), idx=2)\n nested1 = pb.nested(Nested, idx=1)\n nested2 = pb.nested(Nested, idx=2)\n model = pb.from_xml(TestModel, xml)\n assert model.field1 == 'value1'\n assert model.field2 == 'value2'\n assert model.field3 == 'value3'\n assert model.field4 == 'value4'\n assert model.nested1.field == 'value5'\n assert model.nested2.field == 'value6'\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\ndef test_field_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n field = pb.field(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.field is None\n\n\ndef test_attribute_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n attrib = pb.attr(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.attrib is None\n\n\ndef test_private_attributes():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <field1>value1</field1>\n <field2>value2</field2>\n </TestModel>\n \"\"\"\n\n\n @pb.model()\n class TestModel:\n _field1 = pb.field(name='field1')\n __field2 = pb.field(name='field2')\n obj = pb.from_xml(TestModel, xml)\n assert obj._field1 == 'value1'\n assert obj._TestModel__field2 == 'value2'\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\ndef test_attribute_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attribute1=\"value1\" attribute2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr(name='attribute1')\n attrib2 = pb.attr(name='attribute2')\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\ndef test_element_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.field()\n element2 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n\n\ndef test_element_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n model = pb.from_xml(TestModel, xml)\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\ndef test_wrapper_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1', pb.wrap('wrapper2', pb.field()))\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_inheritance_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestRootModel>\n <TestBaseModel>\n <element1>value1</element1>\n </TestBaseModel>\n <TestExtendedModel>\n <element1>value2</element1>\n <element2>value3</element2>\n </TestExtendedModel>\n </TestRootModel>\n \"\"\"\n\n\n @pb.model\n class TestBaseModel:\n element1 = pb.field()\n\n\n @pb.model\n class TestExtendedModel(TestBaseModel):\n element2 = pb.field()\n\n\n @pb.model\n class TestRootModel:\n model1 = pb.nested(TestBaseModel)\n model2 = pb.nested(TestExtendedModel)\n model = pb.from_xml(TestRootModel, xml)\n assert model.model1.element1 == 'value1'\n assert model.model2.element1 == 'value2'\n assert model.model2.element2 == 'value3'\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\ndef test_wrapper_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper>\n <element>value1</element>\n </wrapper>\n <wrapper>\n <element>value2</element>\n </wrapper>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))\n model = pb.from_xml(TestModel, xml)\n assert model.elements == ['value1', 'value2']\n\n\ndef test_nested_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel>\n <element>value1</element>\n </NestedModel>\n <NestedModel>\n <element>value2</element>\n </NestedModel>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel:\n element = pb.field()\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.nested(NestedModel))\n model = pb.from_xml(TestModel, xml)\n assert len(model.elements) == 2\n assert model.elements[0].element == 'value1'\n assert model.elements[1].element == 'value2'\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\ndef test_field_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n field = pb.field(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.field is None\n\n\ndef test_attribute_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n attrib = pb.attr(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.attrib is None\n\n\ndef test_private_attributes():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <field1>value1</field1>\n <field2>value2</field2>\n </TestModel>\n \"\"\"\n\n\n @pb.model()\n class TestModel:\n _field1 = pb.field(name='field1')\n __field2 = pb.field(name='field2')\n obj = pb.from_xml(TestModel, xml)\n assert obj._field1 == 'value1'\n assert obj._TestModel__field2 == 'value2'\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\ndef test_attribute_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attribute1=\"value1\" attribute2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr(name='attribute1')\n attrib2 = pb.attr(name='attribute2')\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\ndef test_element_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.field()\n element2 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n\n\ndef test_element_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n model = pb.from_xml(TestModel, xml)\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_inheritance_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestRootModel>\n <TestBaseModel>\n <element1>value1</element1>\n </TestBaseModel>\n <TestExtendedModel>\n <element1>value2</element1>\n <element2>value3</element2>\n </TestExtendedModel>\n </TestRootModel>\n \"\"\"\n\n\n @pb.model\n class TestBaseModel:\n element1 = pb.field()\n\n\n @pb.model\n class TestExtendedModel(TestBaseModel):\n element2 = pb.field()\n\n\n @pb.model\n class TestRootModel:\n model1 = pb.nested(TestBaseModel)\n model2 = pb.nested(TestExtendedModel)\n model = pb.from_xml(TestRootModel, xml)\n assert model.model1.element1 == 'value1'\n assert model.model2.element1 == 'value2'\n assert model.model2.element2 == 'value3'\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\ndef test_wrapper_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper>\n <element>value1</element>\n </wrapper>\n <wrapper>\n <element>value2</element>\n </wrapper>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))\n model = pb.from_xml(TestModel, xml)\n assert model.elements == ['value1', 'value2']\n\n\ndef test_nested_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel>\n <element>value1</element>\n </NestedModel>\n <NestedModel>\n <element>value2</element>\n </NestedModel>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel:\n element = pb.field()\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.nested(NestedModel))\n model = pb.from_xml(TestModel, xml)\n assert len(model.elements) == 2\n assert model.elements[0].element == 'value1'\n assert model.elements[1].element == 'value2'\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\ndef test_field_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n field = pb.field(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.field is None\n\n\ndef test_attribute_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n attrib = pb.attr(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.attrib is None\n\n\ndef test_private_attributes():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <field1>value1</field1>\n <field2>value2</field2>\n </TestModel>\n \"\"\"\n\n\n @pb.model()\n class TestModel:\n _field1 = pb.field(name='field1')\n __field2 = pb.field(name='field2')\n obj = pb.from_xml(TestModel, xml)\n assert obj._field1 == 'value1'\n assert obj._TestModel__field2 == 'value2'\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\ndef test_attribute_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attribute1=\"value1\" attribute2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr(name='attribute1')\n attrib2 = pb.attr(name='attribute2')\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n\n\ndef test_element_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n model = pb.from_xml(TestModel, xml)\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_inheritance_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestRootModel>\n <TestBaseModel>\n <element1>value1</element1>\n </TestBaseModel>\n <TestExtendedModel>\n <element1>value2</element1>\n <element2>value3</element2>\n </TestExtendedModel>\n </TestRootModel>\n \"\"\"\n\n\n @pb.model\n class TestBaseModel:\n element1 = pb.field()\n\n\n @pb.model\n class TestExtendedModel(TestBaseModel):\n element2 = pb.field()\n\n\n @pb.model\n class TestRootModel:\n model1 = pb.nested(TestBaseModel)\n model2 = pb.nested(TestExtendedModel)\n model = pb.from_xml(TestRootModel, xml)\n assert model.model1.element1 == 'value1'\n assert model.model2.element1 == 'value2'\n assert model.model2.element2 == 'value3'\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\ndef test_wrapper_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper>\n <element>value1</element>\n </wrapper>\n <wrapper>\n <element>value2</element>\n </wrapper>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))\n model = pb.from_xml(TestModel, xml)\n assert model.elements == ['value1', 'value2']\n\n\ndef test_nested_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel>\n <element>value1</element>\n </NestedModel>\n <NestedModel>\n <element>value2</element>\n </NestedModel>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel:\n element = pb.field()\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.nested(NestedModel))\n model = pb.from_xml(TestModel, xml)\n assert len(model.elements) == 2\n assert model.elements[0].element == 'value1'\n assert model.elements[1].element == 'value2'\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\ndef test_field_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n field = pb.field(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.field is None\n\n\ndef test_attribute_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n attrib = pb.attr(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.attrib is None\n\n\ndef test_private_attributes():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <field1>value1</field1>\n <field2>value2</field2>\n </TestModel>\n \"\"\"\n\n\n @pb.model()\n class TestModel:\n _field1 = pb.field(name='field1')\n __field2 = pb.field(name='field2')\n obj = pb.from_xml(TestModel, xml)\n assert obj._field1 == 'value1'\n assert obj._TestModel__field2 == 'value2'\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\ndef test_attribute_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attribute1=\"value1\" attribute2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr(name='attribute1')\n attrib2 = pb.attr(name='attribute2')\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n\n\ndef test_element_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n model = pb.from_xml(TestModel, xml)\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_inheritance_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestRootModel>\n <TestBaseModel>\n <element1>value1</element1>\n </TestBaseModel>\n <TestExtendedModel>\n <element1>value2</element1>\n <element2>value3</element2>\n </TestExtendedModel>\n </TestRootModel>\n \"\"\"\n\n\n @pb.model\n class TestBaseModel:\n element1 = pb.field()\n\n\n @pb.model\n class TestExtendedModel(TestBaseModel):\n element2 = pb.field()\n\n\n @pb.model\n class TestRootModel:\n model1 = pb.nested(TestBaseModel)\n model2 = pb.nested(TestExtendedModel)\n model = pb.from_xml(TestRootModel, xml)\n assert model.model1.element1 == 'value1'\n assert model.model2.element1 == 'value2'\n assert model.model2.element2 == 'value3'\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\ndef test_wrapper_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper>\n <element>value1</element>\n </wrapper>\n <wrapper>\n <element>value2</element>\n </wrapper>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))\n model = pb.from_xml(TestModel, xml)\n assert model.elements == ['value1', 'value2']\n\n\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\ndef test_field_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n field = pb.field(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.field is None\n\n\ndef test_attribute_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n attrib = pb.attr(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.attrib is None\n\n\ndef test_private_attributes():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <field1>value1</field1>\n <field2>value2</field2>\n </TestModel>\n \"\"\"\n\n\n @pb.model()\n class TestModel:\n _field1 = pb.field(name='field1')\n __field2 = pb.field(name='field2')\n obj = pb.from_xml(TestModel, xml)\n assert obj._field1 == 'value1'\n assert obj._TestModel__field2 == 'value2'\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n\n\ndef test_element_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n model = pb.from_xml(TestModel, xml)\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_inheritance_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestRootModel>\n <TestBaseModel>\n <element1>value1</element1>\n </TestBaseModel>\n <TestExtendedModel>\n <element1>value2</element1>\n <element2>value3</element2>\n </TestExtendedModel>\n </TestRootModel>\n \"\"\"\n\n\n @pb.model\n class TestBaseModel:\n element1 = pb.field()\n\n\n @pb.model\n class TestExtendedModel(TestBaseModel):\n element2 = pb.field()\n\n\n @pb.model\n class TestRootModel:\n model1 = pb.nested(TestBaseModel)\n model2 = pb.nested(TestExtendedModel)\n model = pb.from_xml(TestRootModel, xml)\n assert model.model1.element1 == 'value1'\n assert model.model2.element1 == 'value2'\n assert model.model2.element2 == 'value3'\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\ndef test_wrapper_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper>\n <element>value1</element>\n </wrapper>\n <wrapper>\n <element>value2</element>\n </wrapper>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))\n model = pb.from_xml(TestModel, xml)\n assert model.elements == ['value1', 'value2']\n\n\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\ndef test_field_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n field = pb.field(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.field is None\n\n\ndef test_attribute_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n attrib = pb.attr(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.attrib is None\n\n\ndef test_private_attributes():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <field1>value1</field1>\n <field2>value2</field2>\n </TestModel>\n \"\"\"\n\n\n @pb.model()\n class TestModel:\n _field1 = pb.field(name='field1')\n __field2 = pb.field(name='field2')\n obj = pb.from_xml(TestModel, xml)\n assert obj._field1 == 'value1'\n assert obj._TestModel__field2 == 'value2'\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n\n\ndef test_element_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n model = pb.from_xml(TestModel, xml)\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_inheritance_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestRootModel>\n <TestBaseModel>\n <element1>value1</element1>\n </TestBaseModel>\n <TestExtendedModel>\n <element1>value2</element1>\n <element2>value3</element2>\n </TestExtendedModel>\n </TestRootModel>\n \"\"\"\n\n\n @pb.model\n class TestBaseModel:\n element1 = pb.field()\n\n\n @pb.model\n class TestExtendedModel(TestBaseModel):\n element2 = pb.field()\n\n\n @pb.model\n class TestRootModel:\n model1 = pb.nested(TestBaseModel)\n model2 = pb.nested(TestExtendedModel)\n model = pb.from_xml(TestRootModel, xml)\n assert model.model1.element1 == 'value1'\n assert model.model2.element1 == 'value2'\n assert model.model2.element2 == 'value3'\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\ndef test_wrapper_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper>\n <element>value1</element>\n </wrapper>\n <wrapper>\n <element>value2</element>\n </wrapper>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))\n model = pb.from_xml(TestModel, xml)\n assert model.elements == ['value1', 'value2']\n\n\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\ndef test_field_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n field = pb.field(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.field is None\n\n\ndef test_attribute_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n attrib = pb.attr(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.attrib is None\n\n\n<function token>\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n\n\ndef test_element_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n model = pb.from_xml(TestModel, xml)\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_inheritance_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestRootModel>\n <TestBaseModel>\n <element1>value1</element1>\n </TestBaseModel>\n <TestExtendedModel>\n <element1>value2</element1>\n <element2>value3</element2>\n </TestExtendedModel>\n </TestRootModel>\n \"\"\"\n\n\n @pb.model\n class TestBaseModel:\n element1 = pb.field()\n\n\n @pb.model\n class TestExtendedModel(TestBaseModel):\n element2 = pb.field()\n\n\n @pb.model\n class TestRootModel:\n model1 = pb.nested(TestBaseModel)\n model2 = pb.nested(TestExtendedModel)\n model = pb.from_xml(TestRootModel, xml)\n assert model.model1.element1 == 'value1'\n assert model.model2.element1 == 'value2'\n assert model.model2.element2 == 'value3'\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\ndef test_wrapper_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper>\n <element>value1</element>\n </wrapper>\n <wrapper>\n <element>value2</element>\n </wrapper>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))\n model = pb.from_xml(TestModel, xml)\n assert model.elements == ['value1', 'value2']\n\n\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\n<function token>\n\n\ndef test_attribute_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n attrib = pb.attr(default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.attrib is None\n\n\n<function token>\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n\n\ndef test_element_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n model = pb.from_xml(TestModel, xml)\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_inheritance_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestRootModel>\n <TestBaseModel>\n <element1>value1</element1>\n </TestBaseModel>\n <TestExtendedModel>\n <element1>value2</element1>\n <element2>value3</element2>\n </TestExtendedModel>\n </TestRootModel>\n \"\"\"\n\n\n @pb.model\n class TestBaseModel:\n element1 = pb.field()\n\n\n @pb.model\n class TestExtendedModel(TestBaseModel):\n element2 = pb.field()\n\n\n @pb.model\n class TestRootModel:\n model1 = pb.nested(TestBaseModel)\n model2 = pb.nested(TestExtendedModel)\n model = pb.from_xml(TestRootModel, xml)\n assert model.model1.element1 == 'value1'\n assert model.model2.element1 == 'value2'\n assert model.model2.element2 == 'value3'\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\ndef test_wrapper_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper>\n <element>value1</element>\n </wrapper>\n <wrapper>\n <element>value2</element>\n </wrapper>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))\n model = pb.from_xml(TestModel, xml)\n assert model.elements == ['value1', 'value2']\n\n\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n\n\ndef test_element_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n model = pb.from_xml(TestModel, xml)\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\n<function token>\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\ndef test_wrapper_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper>\n <element>value1</element>\n </wrapper>\n <wrapper>\n <element>value2</element>\n </wrapper>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('wrapper', pb.field('element')))\n model = pb.from_xml(TestModel, xml)\n assert model.elements == ['value1', 'value2']\n\n\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n\n\ndef test_element_deserialization_with_name():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element2>value2</element2>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elem1 = pb.field(name='element1')\n elem2 = pb.field(name='element2')\n model = pb.from_xml(TestModel, xml)\n assert model.elem1 == 'value1'\n assert model.elem2 == 'value2'\n\n\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\n<function token>\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\n<function token>\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\n<function token>\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\n<function token>\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_dict_deserialization():\n\n\n @pb.model\n class Nested:\n fields = pb.as_list(pb.field())\n\n\n @pb.model\n class TestModel:\n field = pb.field()\n nested = pb.as_list(pb.nested(Nested))\n data = {'field': 'value1', 'nested': [{'fields': ['value21', 'value22']\n }, {'fields': ['value31', 'value32']}]}\n obj = TestModel(**data)\n assert obj.field == 'value1'\n assert obj.nested == [Nested(fields=['value21', 'value22']), Nested(\n fields=['value31', 'value32'])]\n",
"<import token>\n\n\ndef test_root_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n <element1>value1</element1>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='test_model')\n class TestModel:\n element1 = pb.field()\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\n<function token>\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\n<function token>\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\n<function token>\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\n<function token>\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n\n\ndef test_nested_default():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <test_model>\n </test_model>\n \"\"\"\n\n\n @pb.model(name='nested_model')\n class NestedModel:\n field = pb.field()\n\n\n @pb.model(name='test_model')\n class TestModel:\n nested = pb.nested(NestedModel, default=None)\n obj = pb.from_xml(TestModel, xml)\n assert obj.nested is None\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\n<function token>\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\n<function token>\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\ndef test_namespaces_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <testns1:TestModel xmlns:testns1=\"http://www.test1.org\"\n xmlns:testns2=\"http://www.test2.org\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.test.com schema.xsd\">\n <element1>value1</element1>\n <testns1:element2>value2</testns1:element2>\n <testns2:element3>value3</testns2:element3>\n <testns1:element4 xmlns:testns2=\"http://www.test22.org\">\n <element5>value5</element5>\n <testns2:element6>value6</testns2:element6>\n </testns1:element4>\n </testns1:TestModel>\n \"\"\"\n\n\n @pb.model(ns='testns1', ns_map={'testns2': 'http://www.test2.org'})\n class TestModel:\n schema = pb.attribute('schemaLocation', ns='xsi')\n element1 = pb.field(ns='')\n element2 = pb.field()\n element3 = pb.field(ns='testns2')\n element5 = pb.wrap('element4', pb.field(ns=''))\n element6 = pb.wrap('element4', pb.field(ns='testns2'), ns_map={\n 'testns2': 'http://www.test22.org'})\n model = pb.from_xml(TestModel, xml, ns_map={'testns1':\n 'http://www.test1.org', 'xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n assert model.schema == 'http://www.test.com schema.xsd'\n assert model.element1 == 'value1'\n assert model.element2 == 'value2'\n assert model.element3 == 'value3'\n assert model.element5 == 'value5'\n assert model.element6 == 'value6'\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_wrapper_deserialization_with_path():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <wrapper1>\n <wrapper2>\n <element1>value1</element1>\n </wrapper2>\n </wrapper1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.wrap('wrapper1/wrapper2', pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == 'value1'\n\n\n<function token>\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\n<function token>\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\n<function token>\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\n<function token>\n<function token>\n\n\ndef test_list_of_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>\n <element2>value1</element2>\n <element2>value2</element2>\n </element1>\n <element1>\n <element2>value3</element2>\n <element2>value4</element2>\n </element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n elements = pb.as_list(pb.wrap('element1', pb.as_list(pb.field(\n 'element2'))))\n model = pb.from_xml(TestModel, xml)\n assert model.elements[0][0] == 'value1'\n assert model.elements[0][1] == 'value2'\n assert model.elements[1][0] == 'value3'\n assert model.elements[1][1] == 'value4'\n\n\n<function token>\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\ndef test_attribute_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel attrib1=\"value1\" attrib2=\"value2\"/>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n attrib1 = pb.attr()\n attrib2 = pb.attr()\n model = pb.from_xml(TestModel, xml)\n assert model.attrib1 == 'value1'\n assert model.attrib2 == 'value2'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_complex_xml_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <envelope xmlns=\"http://www.test.org\"\n xmlns:doc=\"http://www.test1.org\"\n xmlns:data=\"http://www.test2.org\">\n <doc:user name=\"Alexey\" surname=\"Ivanov\" age=\"26\">\n\n <doc:contacts>\n <doc:phone>+79204563539</doc:phone>\n <doc:email>[email protected]</doc:email>\n <doc:email>[email protected]</doc:email>\n </doc:contacts>\n\n <doc:documents>\n <doc:passport series=\"3127\" number=\"836815\"/>\n </doc:documents>\n\n <data:occupations xmlns:data=\"http://www.test22.org\">\n <data:occupation title=\"yandex\">\n <data:address>Moscow</data:address>\n <data:employees>8854</data:employees>\n </data:occupation>\n <data:occupation title=\"skbkontur\">\n <data:address>Yekaterinburg</data:address>\n <data:employees>7742</data:employees>\n </data:occupation>\n </data:occupations>\n\n </doc:user>\n </envelope>\n \"\"\"\n\n\n @pb.model(name='occupation', ns='data', ns_map={'data':\n 'http://www.test22.org'})\n class Occupation:\n title = pb.attr()\n address = pb.field()\n employees = pb.field(converter=int)\n\n\n @pb.model(name='user', ns='doc', ns_map={'doc': 'http://www.test1.org'})\n class User:\n name = pb.attr()\n surname = pb.attr()\n age = pb.attr(converter=int)\n phone = pb.wrap('contacts', pb.field())\n emails = pb.wrap('contacts', pb.as_list(pb.field(name='email')))\n passport_series = pb.wrap('documents/passport', pb.attr('series'))\n passport_number = pb.wrap('documents/passport', pb.attr('number'))\n occupations = pb.wrap('occupations', pb.lst(pb.nested(Occupation)),\n ns='data', ns_map={'data': 'http://www.test22.org'})\n citizenship = pb.field(default='RU')\n xml = et.fromstring(xml)\n user = pb.from_xml(User, xml)\n assert user.name == 'Alexey'\n assert user.surname == 'Ivanov'\n assert user.age == 26\n assert user.phone == '+79204563539'\n assert user.emails == ['[email protected]', '[email protected]']\n assert user.passport_series == '3127'\n assert user.passport_number == '836815'\n assert len(user.occupations) == 2\n assert user.occupations[0].title == 'yandex'\n assert user.occupations[0].address == 'Moscow'\n assert user.occupations[0].employees == 8854\n assert user.occupations[1].title == 'skbkontur'\n assert user.occupations[1].address == 'Yekaterinburg'\n assert user.occupations[1].employees == 7742\n assert user.citizenship == 'RU'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_nested_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <NestedModel1>\n <NestedModel2>\n <element>value</element>\n </NestedModel2>\n </NestedModel1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class NestedModel2:\n element = pb.field()\n\n\n @pb.model\n class NestedModel1:\n nested = pb.nested(NestedModel2)\n\n\n @pb.model\n class TestModel:\n nested = pb.nested(NestedModel1)\n model = pb.from_xml(TestModel, xml)\n assert model.nested.nested.element == 'value'\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_element_list_deserialization():\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <TestModel>\n <element1>value1</element1>\n <element1>value2</element1>\n </TestModel>\n \"\"\"\n\n\n @pb.model\n class TestModel:\n element1 = pb.as_list(pb.field())\n model = pb.from_xml(TestModel, xml)\n assert model.element1 == ['value1', 'value2']\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,334 |
d451e12cc8670e34afd4f081099cd18888bfb923
|
import random
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from webdriver_manager.chrome import ChromeDriverManager
from time import sleep
from random import choice
from string import ascii_letters
# def phone():
# var_code = [+3896, +38050, +38097, +38098, +38073, +38099]
# code = random.choice(var_code)
# num = (random.randrange(10, 9000000, 5))
# result ='+'+str(code) + str(num)
# return result
mail_name = ['Olivia','Noah','Ethan','Mason','Logan','Lucas','Jacob','Jackson','Aiden',
'Jack',
'Luke',
'Elijah',
'Benjamin',
'James',
'William',
'Michael',
'Alexander',
'Oliver',
'Daniel',
'Henry',
'Owen',
'Gabriel',
'Matthew',
'Carter',
'Ryan',
'Wyatt',
'Andrew',
'Caleb',
'Jayden',
'Connor',
'Liam',
'Emma',
'Sophia',
'Ava',
'Isabella',
'Mia',
'Charlotte',
'Emily',
'Harper',
'Abigail',
'Madison',
'Avery',
'Ella',
'Madison',
'Lily',
'Chloe',
'Sofia',
'Evelyn',
'Hannah',
'Addison',
'Grace',
'Zoey',
'Aubrey',
'Aria',
'Zoe',
'Ellie',
'Audrey',
'Natalie',
'Elizabeth',
'Scarlett',
]
domains = ['@gmail.com', '@mail.ru','@I.ua','@yahoo.com','@meta.ua', '@icloud.com', '@ukr.net','@yandex.ru']
def e_mail():
first_email_part = (''.join(choice(ascii_letters) for i in range(4)))
second_email_part = random.choice(mail_name)
final_part = random.choice(domains)
result = str(first_email_part) + str(second_email_part)+ str(final_part)
return result
#_______________________EMAIL FUNCTION______________________________________
name_list = ['Иван', 'Гена', 'Кирилл', 'Валентина', 'Дима', 'Ашот', 'Андрей',
'Павел',
'Паисий',
'Пантелеймон',
'Парфений',
'Пафнутий',
'Пахомий',
'Пётр',
'Платон',
'Порфирий',
'Потап',
'Пров',
'Прокопий',
'Протасий',
'Прохор',
'Вазген', 'Эдурд', 'Виталик', 'Марина', 'Элона', 'Илья', 'Володя', 'Артем', 'Василий', 'Гриша',
'Леонид', 'Назар', 'Юрка', 'Алёна', 'Алина', 'Димас', 'Максим' ]
def char_name():
name = random.choice(name_list)
return name
def phone():
var_code = [96, 50, 97, 98, 73, 99]
code = random.choice(var_code)
num = (random.randrange(9000000))
result = str(code) + str(num)
return result
#_________________Phone_function_________________
# print(char_name())
# print(str(phone()))
#______________________________
site= 'xxx'
account = '/html/body/div[1]/div[2]/nonutch/div/div[2]/div[2]/div/div[2]/div/a[1]'
class some_site(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def test_TT_signUP(self):
self.driver.get(site)
self.driver.maximize_window()
sleep(2)
self.driver.find_element_by_xpath('//*[@id="mdl-subcribe-uk"]/button').click()
self.driver.find_element_by_xpath(account).click()
name=self.driver.find_element_by_id('render_form_name')
name.send_keys(char_name())
sleep(3)
email = self.driver.find_element_by_id('render_form_email')
email.send_keys(e_mail())
sleep(5)
fone = self.driver.find_element_by_id('render_form_phone')
fone.send_keys(phone())
sleep(7)
#
# self.driver.find_element_by_class_name('select2-selection__rendered').click() #//*[@id="select2-render_form_office_id-container"]
# sleep(2)
# office = self.driver.find_element_by_class_name('select2-selection__rendered')
# office.send_keys(Keys.DOWN)
# sleep(3)
# office.send_keys(Keys.ENTER)
# sleep(2)
self.driver.find_element_by_id('render_form_submit').click()
sleep(50)
#
# def tearDown(self):
# self.driver.quit()
if __name__ == '__main__':
unittest.main()
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
|
[
"import random\nimport unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom time import sleep\nfrom random import choice\nfrom string import ascii_letters\n\n# def phone():\n# var_code = [+3896, +38050, +38097, +38098, +38073, +38099]\n# code = random.choice(var_code)\n# num = (random.randrange(10, 9000000, 5))\n# result ='+'+str(code) + str(num)\n# return result\nmail_name = ['Olivia','Noah','Ethan','Mason','Logan','Lucas','Jacob','Jackson','Aiden',\n'Jack',\n'Luke',\n'Elijah',\n'Benjamin',\n'James',\n'William',\n'Michael',\n'Alexander',\n'Oliver',\n'Daniel',\n'Henry',\n'Owen',\n'Gabriel',\n'Matthew',\n'Carter',\n'Ryan',\n'Wyatt',\n'Andrew',\n'Caleb',\n'Jayden',\n'Connor',\n'Liam',\n'Emma',\n'Sophia',\n'Ava',\n'Isabella',\n'Mia',\n'Charlotte',\n'Emily',\n'Harper',\n'Abigail',\n'Madison',\n'Avery',\n'Ella',\n'Madison',\n'Lily',\n'Chloe',\n'Sofia',\n'Evelyn',\n'Hannah',\n'Addison',\n'Grace',\n'Zoey',\n'Aubrey',\n'Aria',\n'Zoe',\n'Ellie',\n'Audrey',\n'Natalie',\n'Elizabeth',\n'Scarlett',\n]\ndomains = ['@gmail.com', '@mail.ru','@I.ua','@yahoo.com','@meta.ua', '@icloud.com', '@ukr.net','@yandex.ru']\ndef e_mail():\n first_email_part = (''.join(choice(ascii_letters) for i in range(4)))\n second_email_part = random.choice(mail_name)\n final_part = random.choice(domains)\n result = str(first_email_part) + str(second_email_part)+ str(final_part)\n return result\n#_______________________EMAIL FUNCTION______________________________________\n\n\n\nname_list = ['Иван', 'Гена', 'Кирилл', 'Валентина', 'Дима', 'Ашот', 'Андрей',\n'Павел',\n'Паисий',\n'Пантелеймон',\n'Парфений',\n'Пафнутий',\n'Пахомий',\n'Пётр',\n'Платон',\n'Порфирий',\n'Потап',\n'Пров',\n'Прокопий',\n'Протасий',\n'Прохор',\n\n'Вазген', 'Эдурд', 'Виталик', 'Марина', 'Элона', 'Илья', 'Володя', 'Артем', 'Василий', 'Гриша',\n'Леонид', 'Назар', 'Юрка', 'Алёна', 'Алина', 'Димас', 'Максим' ]\ndef char_name():\n name = random.choice(name_list)\n return name\n\n\n\n\n\n\n\ndef phone():\n var_code = [96, 50, 97, 98, 73, 99]\n code = random.choice(var_code)\n num = (random.randrange(9000000))\n result = str(code) + str(num)\n return result\n#_________________Phone_function_________________\n\n\n\n\n# print(char_name())\n# print(str(phone()))\n#______________________________\n\nsite= 'xxx'\naccount = '/html/body/div[1]/div[2]/nonutch/div/div[2]/div[2]/div/div[2]/div/a[1]'\n\nclass some_site(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Chrome()\n\n def test_TT_signUP(self):\n self.driver.get(site)\n self.driver.maximize_window()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"mdl-subcribe-uk\"]/button').click()\n\n\n self.driver.find_element_by_xpath(account).click()\n name=self.driver.find_element_by_id('render_form_name')\n name.send_keys(char_name())\n sleep(3)\n\n email = self.driver.find_element_by_id('render_form_email')\n email.send_keys(e_mail())\n sleep(5)\n\n fone = self.driver.find_element_by_id('render_form_phone')\n fone.send_keys(phone())\n\n sleep(7)\n #\n # self.driver.find_element_by_class_name('select2-selection__rendered').click() #//*[@id=\"select2-render_form_office_id-container\"]\n # sleep(2)\n # office = self.driver.find_element_by_class_name('select2-selection__rendered')\n # office.send_keys(Keys.DOWN)\n # sleep(3)\n # office.send_keys(Keys.ENTER)\n # sleep(2)\n self.driver.find_element_by_id('render_form_submit').click()\n sleep(50)\n#\n # def tearDown(self):\n # self.driver.quit()\n\nif __name__ == '__main__':\n unittest.main()\n\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n",
"import random\nimport unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom time import sleep\nfrom random import choice\nfrom string import ascii_letters\nmail_name = ['Olivia', 'Noah', 'Ethan', 'Mason', 'Logan', 'Lucas', 'Jacob',\n 'Jackson', 'Aiden', 'Jack', 'Luke', 'Elijah', 'Benjamin', 'James',\n 'William', 'Michael', 'Alexander', 'Oliver', 'Daniel', 'Henry', 'Owen',\n 'Gabriel', 'Matthew', 'Carter', 'Ryan', 'Wyatt', 'Andrew', 'Caleb',\n 'Jayden', 'Connor', 'Liam', 'Emma', 'Sophia', 'Ava', 'Isabella', 'Mia',\n 'Charlotte', 'Emily', 'Harper', 'Abigail', 'Madison', 'Avery', 'Ella',\n 'Madison', 'Lily', 'Chloe', 'Sofia', 'Evelyn', 'Hannah', 'Addison',\n 'Grace', 'Zoey', 'Aubrey', 'Aria', 'Zoe', 'Ellie', 'Audrey', 'Natalie',\n 'Elizabeth', 'Scarlett']\ndomains = ['@gmail.com', '@mail.ru', '@I.ua', '@yahoo.com', '@meta.ua',\n '@icloud.com', '@ukr.net', '@yandex.ru']\n\n\ndef e_mail():\n first_email_part = ''.join(choice(ascii_letters) for i in range(4))\n second_email_part = random.choice(mail_name)\n final_part = random.choice(domains)\n result = str(first_email_part) + str(second_email_part) + str(final_part)\n return result\n\n\nname_list = ['Иван', 'Гена', 'Кирилл', 'Валентина', 'Дима', 'Ашот',\n 'Андрей', 'Павел', 'Паисий', 'Пантелеймон', 'Парфений', 'Пафнутий',\n 'Пахомий', 'Пётр', 'Платон', 'Порфирий', 'Потап', 'Пров', 'Прокопий',\n 'Протасий', 'Прохор', 'Вазген', 'Эдурд', 'Виталик', 'Марина', 'Элона',\n 'Илья', 'Володя', 'Артем', 'Василий', 'Гриша', 'Леонид', 'Назар',\n 'Юрка', 'Алёна', 'Алина', 'Димас', 'Максим']\n\n\ndef char_name():\n name = random.choice(name_list)\n return name\n\n\ndef phone():\n var_code = [96, 50, 97, 98, 73, 99]\n code = random.choice(var_code)\n num = random.randrange(9000000)\n result = str(code) + str(num)\n return result\n\n\nsite = 'xxx'\naccount = (\n '/html/body/div[1]/div[2]/nonutch/div/div[2]/div[2]/div/div[2]/div/a[1]')\n\n\nclass some_site(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n\n def test_TT_signUP(self):\n self.driver.get(site)\n self.driver.maximize_window()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"mdl-subcribe-uk\"]/button'\n ).click()\n self.driver.find_element_by_xpath(account).click()\n name = self.driver.find_element_by_id('render_form_name')\n name.send_keys(char_name())\n sleep(3)\n email = self.driver.find_element_by_id('render_form_email')\n email.send_keys(e_mail())\n sleep(5)\n fone = self.driver.find_element_by_id('render_form_phone')\n fone.send_keys(phone())\n sleep(7)\n self.driver.find_element_by_id('render_form_submit').click()\n sleep(50)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\nmail_name = ['Olivia', 'Noah', 'Ethan', 'Mason', 'Logan', 'Lucas', 'Jacob',\n 'Jackson', 'Aiden', 'Jack', 'Luke', 'Elijah', 'Benjamin', 'James',\n 'William', 'Michael', 'Alexander', 'Oliver', 'Daniel', 'Henry', 'Owen',\n 'Gabriel', 'Matthew', 'Carter', 'Ryan', 'Wyatt', 'Andrew', 'Caleb',\n 'Jayden', 'Connor', 'Liam', 'Emma', 'Sophia', 'Ava', 'Isabella', 'Mia',\n 'Charlotte', 'Emily', 'Harper', 'Abigail', 'Madison', 'Avery', 'Ella',\n 'Madison', 'Lily', 'Chloe', 'Sofia', 'Evelyn', 'Hannah', 'Addison',\n 'Grace', 'Zoey', 'Aubrey', 'Aria', 'Zoe', 'Ellie', 'Audrey', 'Natalie',\n 'Elizabeth', 'Scarlett']\ndomains = ['@gmail.com', '@mail.ru', '@I.ua', '@yahoo.com', '@meta.ua',\n '@icloud.com', '@ukr.net', '@yandex.ru']\n\n\ndef e_mail():\n first_email_part = ''.join(choice(ascii_letters) for i in range(4))\n second_email_part = random.choice(mail_name)\n final_part = random.choice(domains)\n result = str(first_email_part) + str(second_email_part) + str(final_part)\n return result\n\n\nname_list = ['Иван', 'Гена', 'Кирилл', 'Валентина', 'Дима', 'Ашот',\n 'Андрей', 'Павел', 'Паисий', 'Пантелеймон', 'Парфений', 'Пафнутий',\n 'Пахомий', 'Пётр', 'Платон', 'Порфирий', 'Потап', 'Пров', 'Прокопий',\n 'Протасий', 'Прохор', 'Вазген', 'Эдурд', 'Виталик', 'Марина', 'Элона',\n 'Илья', 'Володя', 'Артем', 'Василий', 'Гриша', 'Леонид', 'Назар',\n 'Юрка', 'Алёна', 'Алина', 'Димас', 'Максим']\n\n\ndef char_name():\n name = random.choice(name_list)\n return name\n\n\ndef phone():\n var_code = [96, 50, 97, 98, 73, 99]\n code = random.choice(var_code)\n num = random.randrange(9000000)\n result = str(code) + str(num)\n return result\n\n\nsite = 'xxx'\naccount = (\n '/html/body/div[1]/div[2]/nonutch/div/div[2]/div[2]/div/div[2]/div/a[1]')\n\n\nclass some_site(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n\n def test_TT_signUP(self):\n self.driver.get(site)\n self.driver.maximize_window()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"mdl-subcribe-uk\"]/button'\n ).click()\n self.driver.find_element_by_xpath(account).click()\n name = self.driver.find_element_by_id('render_form_name')\n name.send_keys(char_name())\n sleep(3)\n email = self.driver.find_element_by_id('render_form_email')\n email.send_keys(e_mail())\n sleep(5)\n fone = self.driver.find_element_by_id('render_form_phone')\n fone.send_keys(phone())\n sleep(7)\n self.driver.find_element_by_id('render_form_submit').click()\n sleep(50)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n<assignment token>\n\n\ndef e_mail():\n first_email_part = ''.join(choice(ascii_letters) for i in range(4))\n second_email_part = random.choice(mail_name)\n final_part = random.choice(domains)\n result = str(first_email_part) + str(second_email_part) + str(final_part)\n return result\n\n\n<assignment token>\n\n\ndef char_name():\n name = random.choice(name_list)\n return name\n\n\ndef phone():\n var_code = [96, 50, 97, 98, 73, 99]\n code = random.choice(var_code)\n num = random.randrange(9000000)\n result = str(code) + str(num)\n return result\n\n\n<assignment token>\n\n\nclass some_site(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n\n def test_TT_signUP(self):\n self.driver.get(site)\n self.driver.maximize_window()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"mdl-subcribe-uk\"]/button'\n ).click()\n self.driver.find_element_by_xpath(account).click()\n name = self.driver.find_element_by_id('render_form_name')\n name.send_keys(char_name())\n sleep(3)\n email = self.driver.find_element_by_id('render_form_email')\n email.send_keys(e_mail())\n sleep(5)\n fone = self.driver.find_element_by_id('render_form_phone')\n fone.send_keys(phone())\n sleep(7)\n self.driver.find_element_by_id('render_form_submit').click()\n sleep(50)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n<assignment token>\n\n\ndef e_mail():\n first_email_part = ''.join(choice(ascii_letters) for i in range(4))\n second_email_part = random.choice(mail_name)\n final_part = random.choice(domains)\n result = str(first_email_part) + str(second_email_part) + str(final_part)\n return result\n\n\n<assignment token>\n\n\ndef char_name():\n name = random.choice(name_list)\n return name\n\n\ndef phone():\n var_code = [96, 50, 97, 98, 73, 99]\n code = random.choice(var_code)\n num = random.randrange(9000000)\n result = str(code) + str(num)\n return result\n\n\n<assignment token>\n\n\nclass some_site(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n\n def test_TT_signUP(self):\n self.driver.get(site)\n self.driver.maximize_window()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"mdl-subcribe-uk\"]/button'\n ).click()\n self.driver.find_element_by_xpath(account).click()\n name = self.driver.find_element_by_id('render_form_name')\n name.send_keys(char_name())\n sleep(3)\n email = self.driver.find_element_by_id('render_form_email')\n email.send_keys(e_mail())\n sleep(5)\n fone = self.driver.find_element_by_id('render_form_phone')\n fone.send_keys(phone())\n sleep(7)\n self.driver.find_element_by_id('render_form_submit').click()\n sleep(50)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef e_mail():\n first_email_part = ''.join(choice(ascii_letters) for i in range(4))\n second_email_part = random.choice(mail_name)\n final_part = random.choice(domains)\n result = str(first_email_part) + str(second_email_part) + str(final_part)\n return result\n\n\n<assignment token>\n\n\ndef char_name():\n name = random.choice(name_list)\n return name\n\n\n<function token>\n<assignment token>\n\n\nclass some_site(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n\n def test_TT_signUP(self):\n self.driver.get(site)\n self.driver.maximize_window()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"mdl-subcribe-uk\"]/button'\n ).click()\n self.driver.find_element_by_xpath(account).click()\n name = self.driver.find_element_by_id('render_form_name')\n name.send_keys(char_name())\n sleep(3)\n email = self.driver.find_element_by_id('render_form_email')\n email.send_keys(e_mail())\n sleep(5)\n fone = self.driver.find_element_by_id('render_form_phone')\n fone.send_keys(phone())\n sleep(7)\n self.driver.find_element_by_id('render_form_submit').click()\n sleep(50)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef e_mail():\n first_email_part = ''.join(choice(ascii_letters) for i in range(4))\n second_email_part = random.choice(mail_name)\n final_part = random.choice(domains)\n result = str(first_email_part) + str(second_email_part) + str(final_part)\n return result\n\n\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n\n\nclass some_site(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n\n def test_TT_signUP(self):\n self.driver.get(site)\n self.driver.maximize_window()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"mdl-subcribe-uk\"]/button'\n ).click()\n self.driver.find_element_by_xpath(account).click()\n name = self.driver.find_element_by_id('render_form_name')\n name.send_keys(char_name())\n sleep(3)\n email = self.driver.find_element_by_id('render_form_email')\n email.send_keys(e_mail())\n sleep(5)\n fone = self.driver.find_element_by_id('render_form_phone')\n fone.send_keys(phone())\n sleep(7)\n self.driver.find_element_by_id('render_form_submit').click()\n sleep(50)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n\n\nclass some_site(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n\n def test_TT_signUP(self):\n self.driver.get(site)\n self.driver.maximize_window()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"mdl-subcribe-uk\"]/button'\n ).click()\n self.driver.find_element_by_xpath(account).click()\n name = self.driver.find_element_by_id('render_form_name')\n name.send_keys(char_name())\n sleep(3)\n email = self.driver.find_element_by_id('render_form_email')\n email.send_keys(e_mail())\n sleep(5)\n fone = self.driver.find_element_by_id('render_form_phone')\n fone.send_keys(phone())\n sleep(7)\n self.driver.find_element_by_id('render_form_submit').click()\n sleep(50)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n\n\nclass some_site(unittest.TestCase):\n <function token>\n\n def test_TT_signUP(self):\n self.driver.get(site)\n self.driver.maximize_window()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"mdl-subcribe-uk\"]/button'\n ).click()\n self.driver.find_element_by_xpath(account).click()\n name = self.driver.find_element_by_id('render_form_name')\n name.send_keys(char_name())\n sleep(3)\n email = self.driver.find_element_by_id('render_form_email')\n email.send_keys(e_mail())\n sleep(5)\n fone = self.driver.find_element_by_id('render_form_phone')\n fone.send_keys(phone())\n sleep(7)\n self.driver.find_element_by_id('render_form_submit').click()\n sleep(50)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n\n\nclass some_site(unittest.TestCase):\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<class token>\n<code token>\n"
] | false |
98,335 |
9b48631714f71592e5582792c24028bc28bf557f
|
from django.urls import path
from django.views.decorators.cache import never_cache
from django.conf.urls.static import static, serve
from django.urls import reverse_lazy
from django.contrib.auth.views import PasswordResetConfirmView
from bboard import settings
from .views import detail, detail1, profile_bb_detail
from .views import index, other_page, BBLoginView, profile, BBLogoutView, ChangeUserInfoView, BBPasswordChangeView, RegisterUserView, RegisterDoneView, by_rubric
from .views import user_activate, DeleteUserView, BBPasswordResetView, BBPasswordResetDoneView, BBPasswordResetCompleteView, profile_bb_add, profile_bb_change, profile_bb_delete
app_name = 'main'
urlpatterns = [
path('detail/<int:pk>/', detail1, name='detail1'),
path('<int:rubric_pk>/<int:pk>/', detail, name='detail'),
path('<int:pk>/', by_rubric, name='by_rubric'),
path('<str:page>/', other_page, name='other'),
path('', index, name='index'),
path('accounts/login/', BBLoginView.as_view(), name='login'),
path('accounts/logout/', BBLogoutView.as_view(), name='logout'),
path('accounts/profile/change/<int:pk>', profile_bb_change, name='profile_bb_change'),
path('accounts/profile/delete/<int:pk>', profile_bb_delete, name='profile_bb_delete'),
path('accounts/profile/add', profile_bb_add, name='profile_bb_add'),
path('accounts/profile/<int:pk>', profile_bb_detail, name='profile_bb_detail'),
path('accounts/profile/', profile, name='profile'),
path('accounts/profile/change/', ChangeUserInfoView.as_view(), name='profile_change'),
path('accounts/profile/delete/', DeleteUserView.as_view(), name='profile_delete'),
path('accounts/password/change', BBPasswordChangeView.as_view(), name='password_change'),
path('accounts/register/done', RegisterDoneView.as_view(), name='register_done'),
path('accounts/register/', RegisterUserView.as_view(), name='register'),
path('accounts/register/activate/<str:sign>/', user_activate, name='register_activate'),
path('accounts/password_reset/', BBPasswordResetView.as_view(), name='password_reset'),
path('accounts/password/reset/<uidb64>/<token>/', PasswordResetConfirmView.as_view(template_name = 'main/password_reset_confirm.html', post_reset_login = False,
success_url = reverse_lazy('main:password_reset_complete')),
name='password_reset_confirm'),
path('accounts/password_reset/done', BBPasswordResetDoneView.as_view(), name='password_reset_done'),
path('accounts/password/reset/done/', BBPasswordResetCompleteView.as_view(), name='password_reset_complete'),
]
if settings.DEBUG:
urlpatterns.append(path('static/<path:path>', never_cache(serve)))
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"from django.urls import path\n\nfrom django.views.decorators.cache import never_cache\nfrom django.conf.urls.static import static, serve\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.views import PasswordResetConfirmView\nfrom bboard import settings\n\nfrom .views import detail, detail1, profile_bb_detail\nfrom .views import index, other_page, BBLoginView, profile, BBLogoutView, ChangeUserInfoView, BBPasswordChangeView, RegisterUserView, RegisterDoneView, by_rubric\nfrom .views import user_activate, DeleteUserView, BBPasswordResetView, BBPasswordResetDoneView, BBPasswordResetCompleteView, profile_bb_add, profile_bb_change, profile_bb_delete\n\n\napp_name = 'main'\nurlpatterns = [\n\n path('detail/<int:pk>/', detail1, name='detail1'),\n path('<int:rubric_pk>/<int:pk>/', detail, name='detail'),\n path('<int:pk>/', by_rubric, name='by_rubric'),\n path('<str:page>/', other_page, name='other'),\n\n path('', index, name='index'),\n\n path('accounts/login/', BBLoginView.as_view(), name='login'),\n\n path('accounts/logout/', BBLogoutView.as_view(), name='logout'),\n\n path('accounts/profile/change/<int:pk>', profile_bb_change, name='profile_bb_change'),\n path('accounts/profile/delete/<int:pk>', profile_bb_delete, name='profile_bb_delete'),\n path('accounts/profile/add', profile_bb_add, name='profile_bb_add'),\n path('accounts/profile/<int:pk>', profile_bb_detail, name='profile_bb_detail'),\n path('accounts/profile/', profile, name='profile'),\n path('accounts/profile/change/', ChangeUserInfoView.as_view(), name='profile_change'),\n path('accounts/profile/delete/', DeleteUserView.as_view(), name='profile_delete'),\n\n path('accounts/password/change', BBPasswordChangeView.as_view(), name='password_change'),\n\n path('accounts/register/done', RegisterDoneView.as_view(), name='register_done'),\n path('accounts/register/', RegisterUserView.as_view(), name='register'),\n path('accounts/register/activate/<str:sign>/', user_activate, name='register_activate'),\n\n\n path('accounts/password_reset/', BBPasswordResetView.as_view(), name='password_reset'),\n path('accounts/password/reset/<uidb64>/<token>/', PasswordResetConfirmView.as_view(template_name = 'main/password_reset_confirm.html', post_reset_login = False,\n success_url = reverse_lazy('main:password_reset_complete')),\n name='password_reset_confirm'),\n path('accounts/password_reset/done', BBPasswordResetDoneView.as_view(), name='password_reset_done'),\n path('accounts/password/reset/done/', BBPasswordResetCompleteView.as_view(), name='password_reset_complete'),\n\n\n\n\n]\n\nif settings.DEBUG:\n urlpatterns.append(path('static/<path:path>', never_cache(serve)))\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)",
"from django.urls import path\nfrom django.views.decorators.cache import never_cache\nfrom django.conf.urls.static import static, serve\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.views import PasswordResetConfirmView\nfrom bboard import settings\nfrom .views import detail, detail1, profile_bb_detail\nfrom .views import index, other_page, BBLoginView, profile, BBLogoutView, ChangeUserInfoView, BBPasswordChangeView, RegisterUserView, RegisterDoneView, by_rubric\nfrom .views import user_activate, DeleteUserView, BBPasswordResetView, BBPasswordResetDoneView, BBPasswordResetCompleteView, profile_bb_add, profile_bb_change, profile_bb_delete\napp_name = 'main'\nurlpatterns = [path('detail/<int:pk>/', detail1, name='detail1'), path(\n '<int:rubric_pk>/<int:pk>/', detail, name='detail'), path('<int:pk>/',\n by_rubric, name='by_rubric'), path('<str:page>/', other_page, name=\n 'other'), path('', index, name='index'), path('accounts/login/',\n BBLoginView.as_view(), name='login'), path('accounts/logout/',\n BBLogoutView.as_view(), name='logout'), path(\n 'accounts/profile/change/<int:pk>', profile_bb_change, name=\n 'profile_bb_change'), path('accounts/profile/delete/<int:pk>',\n profile_bb_delete, name='profile_bb_delete'), path(\n 'accounts/profile/add', profile_bb_add, name='profile_bb_add'), path(\n 'accounts/profile/<int:pk>', profile_bb_detail, name=\n 'profile_bb_detail'), path('accounts/profile/', profile, name='profile'\n ), path('accounts/profile/change/', ChangeUserInfoView.as_view(), name=\n 'profile_change'), path('accounts/profile/delete/', DeleteUserView.\n as_view(), name='profile_delete'), path('accounts/password/change',\n BBPasswordChangeView.as_view(), name='password_change'), path(\n 'accounts/register/done', RegisterDoneView.as_view(), name=\n 'register_done'), path('accounts/register/', RegisterUserView.as_view(),\n name='register'), path('accounts/register/activate/<str:sign>/',\n user_activate, name='register_activate'), path(\n 'accounts/password_reset/', BBPasswordResetView.as_view(), name=\n 'password_reset'), path('accounts/password/reset/<uidb64>/<token>/',\n PasswordResetConfirmView.as_view(template_name=\n 'main/password_reset_confirm.html', post_reset_login=False, success_url\n =reverse_lazy('main:password_reset_complete')), name=\n 'password_reset_confirm'), path('accounts/password_reset/done',\n BBPasswordResetDoneView.as_view(), name='password_reset_done'), path(\n 'accounts/password/reset/done/', BBPasswordResetCompleteView.as_view(),\n name='password_reset_complete')]\nif settings.DEBUG:\n urlpatterns.append(path('static/<path:path>', never_cache(serve)))\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"<import token>\napp_name = 'main'\nurlpatterns = [path('detail/<int:pk>/', detail1, name='detail1'), path(\n '<int:rubric_pk>/<int:pk>/', detail, name='detail'), path('<int:pk>/',\n by_rubric, name='by_rubric'), path('<str:page>/', other_page, name=\n 'other'), path('', index, name='index'), path('accounts/login/',\n BBLoginView.as_view(), name='login'), path('accounts/logout/',\n BBLogoutView.as_view(), name='logout'), path(\n 'accounts/profile/change/<int:pk>', profile_bb_change, name=\n 'profile_bb_change'), path('accounts/profile/delete/<int:pk>',\n profile_bb_delete, name='profile_bb_delete'), path(\n 'accounts/profile/add', profile_bb_add, name='profile_bb_add'), path(\n 'accounts/profile/<int:pk>', profile_bb_detail, name=\n 'profile_bb_detail'), path('accounts/profile/', profile, name='profile'\n ), path('accounts/profile/change/', ChangeUserInfoView.as_view(), name=\n 'profile_change'), path('accounts/profile/delete/', DeleteUserView.\n as_view(), name='profile_delete'), path('accounts/password/change',\n BBPasswordChangeView.as_view(), name='password_change'), path(\n 'accounts/register/done', RegisterDoneView.as_view(), name=\n 'register_done'), path('accounts/register/', RegisterUserView.as_view(),\n name='register'), path('accounts/register/activate/<str:sign>/',\n user_activate, name='register_activate'), path(\n 'accounts/password_reset/', BBPasswordResetView.as_view(), name=\n 'password_reset'), path('accounts/password/reset/<uidb64>/<token>/',\n PasswordResetConfirmView.as_view(template_name=\n 'main/password_reset_confirm.html', post_reset_login=False, success_url\n =reverse_lazy('main:password_reset_complete')), name=\n 'password_reset_confirm'), path('accounts/password_reset/done',\n BBPasswordResetDoneView.as_view(), name='password_reset_done'), path(\n 'accounts/password/reset/done/', BBPasswordResetCompleteView.as_view(),\n name='password_reset_complete')]\nif settings.DEBUG:\n urlpatterns.append(path('static/<path:path>', never_cache(serve)))\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"<import token>\n<assignment token>\nif settings.DEBUG:\n urlpatterns.append(path('static/<path:path>', never_cache(serve)))\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,336 |
4b59012e05d2f8264a8668f204f0cee1f4534254
|
__author__ = 'djvdorp'
from collections import OrderedDict
from progressbar import *
import shapefile
import pyproj
import csv
import logging
import pandas
HECTOPUNTEN_OUTPUT_FIELDS = ['HECTOMTRNG', 'AFSTAND', 'WVK_ID', 'WVK_BEGDAT']
WEGVAKKEN_OUTPUT_FIELDS = ['WVK_ID', 'WVK_BEGDAT', 'JTE_ID_BEG', 'JTE_ID_END', 'WEGBEHSRT', 'WEGNUMMER', 'WEGDEELLTR', 'HECTO_LTTR', 'BAANSUBSRT', 'RPE_CODE', 'ADMRICHTNG', 'RIJRICHTNG', 'STT_NAAM', 'WPSNAAMNEN', 'GME_ID', 'GME_NAAM', 'HNRSTRLNKS', 'HNRSTRRHTS', 'E_HNR_LNKS', 'E_HNR_RHTS', 'L_HNR_LNKS', 'L_HNR_RHTS', 'BEGAFSTAND', 'ENDAFSTAND', 'BEGINKM', 'EINDKM', 'POS_TV_WOL']
MERGED_OUTPUT_FIELDS = ['ID', 'WEGNUMMER', 'HECTOMTRNG', 'LONGITUDE', 'LATITUDE', 'STT_NAAM', 'GME_NAAM', 'WEGBEHSRT', 'RPE_CODE', 'POS_TV_WOL', 'WEGDEELLTR', 'HECTO_LTTR', 'BAANSUBSRT']
MERGED_RENAME_FIELDS_MAPPING = {'ID': 'HP_ID', 'WEGNUMMER': 'WEGNR','HECTOMTRNG': 'HECTONR'}
logging.basicConfig(level=logging.INFO)
widgets = ['Processing: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ', ETA()]
def shp_transform_to_different_projection(input_path, input_fields, src_projection, dest_projection, output_filename):
logging.info("START processing shapefile '{}' to '{}'".format(input_path, output_filename))
csv_file = open(output_filename, 'wb')
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
r = shapefile.Reader(input_path)
input_shapes = r.shapeRecords()
nr_of_shapes_in_file = len(input_shapes)
logging.info("{} shapes in file '{}' will be transformed".format(nr_of_shapes_in_file, input_path))
field_names = [str(i[0]) for i in r.fields]
field_names.remove('DeletionFlag') # of moet dit zijn: del field_names[0]
logging.info("fieldNames in shapefile: {}".format(field_names))
input_projection = pyproj.Proj(src_projection)
output_projection = pyproj.Proj(dest_projection)
# 3 = shapefile.POLYLINE = wegvakken
# 8 = shapefile.MULTIPOINT = hectopunten
logging.info("shapeType read: {}".format(r.shapeType))
counter = 0
pbar = ProgressBar(widgets=widgets, maxval=nr_of_shapes_in_file).start()
for input_shape in input_shapes:
nr_of_points_in_shape = len(input_shape.shape.points)
result_entry = OrderedDict()
for input_field in input_fields:
key = (field_names.index(input_field))
input_record = input_shape.record
input_entry = input_record[key]
# Lists (voor datum) platslaan tot een string
if isinstance(input_entry, list):
input_entry = int_array_to_string(input_entry)
# HECTOMTRNG in Hectopunten.shp moet gedeeld worden door 10
if input_field == 'HECTOMTRNG':
input_entry = (input_record[key] / 10.)
result_entry[input_field] = input_entry
if nr_of_points_in_shape == 1:
input_x = input_shape.shape.points[0][0]
input_y = input_shape.shape.points[0][1]
# Convert input_x, input_y from Rijksdriehoekstelsel_New to WGS84
x, y = pyproj.transform(input_projection, output_projection, input_x, input_y)
logging.debug(field_names)
logging.debug([str(i) for i in input_record])
logging.debug('Rijksdriehoekstelsel_New ({:-f}, {:-f}) becomes WGS84 ({:-f}, {:-f})'.format(input_x, input_y, x, y))
result_entry['LONGITUDE'] = x
result_entry['LATITUDE'] = y
else:
logging.debug("number of points for this shape was >1, it was: {}".format(nr_of_points_in_shape))
headers = result_entry.keys()
if counter == 0:
writer.writerow(headers)
line = []
for field in headers:
line.append(result_entry[field])
writer.writerow(line)
counter += 1
pbar.update(counter)
csv_file.close()
pbar.finish()
logging.info("FINISHED processing - saved file '{}'".format(output_filename))
def int_array_to_string(input_array):
return "-".join(str(i) for i in input_array)
def merge_shapefile_csvs(input_hectopunten, input_wegvakken, merge_on_field, fields_to_keep, fields_rename_mapping, output_filename):
logging.info("START merging csv files '{}' and '{}' to file '{}'".format(input_hectopunten, input_wegvakken, output_filename))
hectopunten_df = pandas.read_csv(input_hectopunten)
wegvakken_df = pandas.read_csv(input_wegvakken)
# Join de 2 input files samen, left=hectopunten en right=wegvakken
merged_df = pandas.merge(hectopunten_df, wegvakken_df, on=merge_on_field)
# Voeg een ID field toe per regel
merged_df['ID'] = merged_df.index
# Bewaar alleen de meegegeven velden om te bewaren
result_df = merged_df[fields_to_keep]
# Hernoem columns zodat deze af kunnen wijken van de input columns
result_df = result_df.rename(columns=fields_rename_mapping)
# Exporteer dit naar een merged csv
result_df.to_csv(output_filename, mode='wb', index=False, header=True, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
logging.info("FINISHED merging csv files - saved file '{}'".format(output_filename))
# Real action here
input_projection_string = "+init=EPSG:28992" # Dit is Rijksdriehoekstelsel_New vanuit de .prj files, officieel EPSG:28992 Amersfoort / RD New
output_projection_string = "+init=EPSG:4326" # LatLon with WGS84 datum used by GPS units and Google Earth, officieel EPSG:4326
# Bestanden kunnen worden gevonden op: http://www.jigsaw.nl/nwb/downloads/NWB_01-07-2014.zip
shp_hectopunten = "input/Hectopunten/Hectopunten"
shp_wegvakken = "input/Wegvakken/Wegvakken"
# CSV files van de SHP files
csv_hectopunten = "output/Hectopunten.csv"
csv_wegvakken = "output/Wegvakken.csv"
# CSV output na mergen
csv_merged = "output/merged.csv"
shp_transform_to_different_projection(shp_hectopunten, HECTOPUNTEN_OUTPUT_FIELDS, input_projection_string, output_projection_string, csv_hectopunten)
shp_transform_to_different_projection(shp_wegvakken, WEGVAKKEN_OUTPUT_FIELDS, input_projection_string, output_projection_string, csv_wegvakken)
merge_shapefile_csvs(csv_hectopunten, csv_wegvakken, 'WVK_ID', MERGED_OUTPUT_FIELDS, MERGED_RENAME_FIELDS_MAPPING, csv_merged)
|
[
"__author__ = 'djvdorp'\nfrom collections import OrderedDict\nfrom progressbar import *\n\nimport shapefile\nimport pyproj\nimport csv\nimport logging\nimport pandas\n\nHECTOPUNTEN_OUTPUT_FIELDS = ['HECTOMTRNG', 'AFSTAND', 'WVK_ID', 'WVK_BEGDAT']\nWEGVAKKEN_OUTPUT_FIELDS = ['WVK_ID', 'WVK_BEGDAT', 'JTE_ID_BEG', 'JTE_ID_END', 'WEGBEHSRT', 'WEGNUMMER', 'WEGDEELLTR', 'HECTO_LTTR', 'BAANSUBSRT', 'RPE_CODE', 'ADMRICHTNG', 'RIJRICHTNG', 'STT_NAAM', 'WPSNAAMNEN', 'GME_ID', 'GME_NAAM', 'HNRSTRLNKS', 'HNRSTRRHTS', 'E_HNR_LNKS', 'E_HNR_RHTS', 'L_HNR_LNKS', 'L_HNR_RHTS', 'BEGAFSTAND', 'ENDAFSTAND', 'BEGINKM', 'EINDKM', 'POS_TV_WOL']\n\nMERGED_OUTPUT_FIELDS = ['ID', 'WEGNUMMER', 'HECTOMTRNG', 'LONGITUDE', 'LATITUDE', 'STT_NAAM', 'GME_NAAM', 'WEGBEHSRT', 'RPE_CODE', 'POS_TV_WOL', 'WEGDEELLTR', 'HECTO_LTTR', 'BAANSUBSRT']\nMERGED_RENAME_FIELDS_MAPPING = {'ID': 'HP_ID', 'WEGNUMMER': 'WEGNR','HECTOMTRNG': 'HECTONR'}\n\nlogging.basicConfig(level=logging.INFO)\n\nwidgets = ['Processing: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ', ETA()]\n\ndef shp_transform_to_different_projection(input_path, input_fields, src_projection, dest_projection, output_filename):\n logging.info(\"START processing shapefile '{}' to '{}'\".format(input_path, output_filename))\n\n csv_file = open(output_filename, 'wb')\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n\n r = shapefile.Reader(input_path)\n input_shapes = r.shapeRecords()\n\n nr_of_shapes_in_file = len(input_shapes)\n logging.info(\"{} shapes in file '{}' will be transformed\".format(nr_of_shapes_in_file, input_path))\n\n field_names = [str(i[0]) for i in r.fields]\n field_names.remove('DeletionFlag') # of moet dit zijn: del field_names[0]\n logging.info(\"fieldNames in shapefile: {}\".format(field_names))\n\n input_projection = pyproj.Proj(src_projection)\n output_projection = pyproj.Proj(dest_projection)\n\n # 3 = shapefile.POLYLINE = wegvakken\n # 8 = shapefile.MULTIPOINT = hectopunten\n logging.info(\"shapeType read: {}\".format(r.shapeType))\n\n counter = 0\n pbar = ProgressBar(widgets=widgets, maxval=nr_of_shapes_in_file).start()\n\n for input_shape in input_shapes:\n nr_of_points_in_shape = len(input_shape.shape.points)\n\n result_entry = OrderedDict()\n for input_field in input_fields:\n key = (field_names.index(input_field))\n\n input_record = input_shape.record\n input_entry = input_record[key]\n\n # Lists (voor datum) platslaan tot een string\n if isinstance(input_entry, list):\n input_entry = int_array_to_string(input_entry)\n\n # HECTOMTRNG in Hectopunten.shp moet gedeeld worden door 10\n if input_field == 'HECTOMTRNG':\n input_entry = (input_record[key] / 10.)\n\n result_entry[input_field] = input_entry\n\n if nr_of_points_in_shape == 1:\n input_x = input_shape.shape.points[0][0]\n input_y = input_shape.shape.points[0][1]\n\n # Convert input_x, input_y from Rijksdriehoekstelsel_New to WGS84\n x, y = pyproj.transform(input_projection, output_projection, input_x, input_y)\n\n logging.debug(field_names)\n logging.debug([str(i) for i in input_record])\n logging.debug('Rijksdriehoekstelsel_New ({:-f}, {:-f}) becomes WGS84 ({:-f}, {:-f})'.format(input_x, input_y, x, y))\n\n result_entry['LONGITUDE'] = x\n result_entry['LATITUDE'] = y\n else:\n logging.debug(\"number of points for this shape was >1, it was: {}\".format(nr_of_points_in_shape))\n\n headers = result_entry.keys()\n if counter == 0:\n writer.writerow(headers)\n\n line = []\n for field in headers:\n line.append(result_entry[field])\n writer.writerow(line)\n\n counter += 1\n pbar.update(counter)\n\n csv_file.close()\n pbar.finish()\n logging.info(\"FINISHED processing - saved file '{}'\".format(output_filename))\n\n\ndef int_array_to_string(input_array):\n return \"-\".join(str(i) for i in input_array)\n\n\ndef merge_shapefile_csvs(input_hectopunten, input_wegvakken, merge_on_field, fields_to_keep, fields_rename_mapping, output_filename):\n logging.info(\"START merging csv files '{}' and '{}' to file '{}'\".format(input_hectopunten, input_wegvakken, output_filename))\n hectopunten_df = pandas.read_csv(input_hectopunten)\n wegvakken_df = pandas.read_csv(input_wegvakken)\n\n # Join de 2 input files samen, left=hectopunten en right=wegvakken\n merged_df = pandas.merge(hectopunten_df, wegvakken_df, on=merge_on_field)\n # Voeg een ID field toe per regel\n merged_df['ID'] = merged_df.index\n\n # Bewaar alleen de meegegeven velden om te bewaren\n result_df = merged_df[fields_to_keep]\n\n # Hernoem columns zodat deze af kunnen wijken van de input columns\n result_df = result_df.rename(columns=fields_rename_mapping)\n\n # Exporteer dit naar een merged csv\n result_df.to_csv(output_filename, mode='wb', index=False, header=True, delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n logging.info(\"FINISHED merging csv files - saved file '{}'\".format(output_filename))\n\n\n# Real action here\ninput_projection_string = \"+init=EPSG:28992\" # Dit is Rijksdriehoekstelsel_New vanuit de .prj files, officieel EPSG:28992 Amersfoort / RD New\noutput_projection_string = \"+init=EPSG:4326\" # LatLon with WGS84 datum used by GPS units and Google Earth, officieel EPSG:4326\n\n# Bestanden kunnen worden gevonden op: http://www.jigsaw.nl/nwb/downloads/NWB_01-07-2014.zip\nshp_hectopunten = \"input/Hectopunten/Hectopunten\"\nshp_wegvakken = \"input/Wegvakken/Wegvakken\"\n\n# CSV files van de SHP files\ncsv_hectopunten = \"output/Hectopunten.csv\"\ncsv_wegvakken = \"output/Wegvakken.csv\"\n\n# CSV output na mergen\ncsv_merged = \"output/merged.csv\"\n\nshp_transform_to_different_projection(shp_hectopunten, HECTOPUNTEN_OUTPUT_FIELDS, input_projection_string, output_projection_string, csv_hectopunten)\nshp_transform_to_different_projection(shp_wegvakken, WEGVAKKEN_OUTPUT_FIELDS, input_projection_string, output_projection_string, csv_wegvakken)\n\nmerge_shapefile_csvs(csv_hectopunten, csv_wegvakken, 'WVK_ID', MERGED_OUTPUT_FIELDS, MERGED_RENAME_FIELDS_MAPPING, csv_merged)",
"__author__ = 'djvdorp'\nfrom collections import OrderedDict\nfrom progressbar import *\nimport shapefile\nimport pyproj\nimport csv\nimport logging\nimport pandas\nHECTOPUNTEN_OUTPUT_FIELDS = ['HECTOMTRNG', 'AFSTAND', 'WVK_ID', 'WVK_BEGDAT']\nWEGVAKKEN_OUTPUT_FIELDS = ['WVK_ID', 'WVK_BEGDAT', 'JTE_ID_BEG',\n 'JTE_ID_END', 'WEGBEHSRT', 'WEGNUMMER', 'WEGDEELLTR', 'HECTO_LTTR',\n 'BAANSUBSRT', 'RPE_CODE', 'ADMRICHTNG', 'RIJRICHTNG', 'STT_NAAM',\n 'WPSNAAMNEN', 'GME_ID', 'GME_NAAM', 'HNRSTRLNKS', 'HNRSTRRHTS',\n 'E_HNR_LNKS', 'E_HNR_RHTS', 'L_HNR_LNKS', 'L_HNR_RHTS', 'BEGAFSTAND',\n 'ENDAFSTAND', 'BEGINKM', 'EINDKM', 'POS_TV_WOL']\nMERGED_OUTPUT_FIELDS = ['ID', 'WEGNUMMER', 'HECTOMTRNG', 'LONGITUDE',\n 'LATITUDE', 'STT_NAAM', 'GME_NAAM', 'WEGBEHSRT', 'RPE_CODE',\n 'POS_TV_WOL', 'WEGDEELLTR', 'HECTO_LTTR', 'BAANSUBSRT']\nMERGED_RENAME_FIELDS_MAPPING = {'ID': 'HP_ID', 'WEGNUMMER': 'WEGNR',\n 'HECTOMTRNG': 'HECTONR'}\nlogging.basicConfig(level=logging.INFO)\nwidgets = ['Processing: ', Percentage(), ' ', Bar(marker=RotatingMarker()),\n ' ', ETA()]\n\n\ndef shp_transform_to_different_projection(input_path, input_fields,\n src_projection, dest_projection, output_filename):\n logging.info(\"START processing shapefile '{}' to '{}'\".format(\n input_path, output_filename))\n csv_file = open(output_filename, 'wb')\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv\n .QUOTE_NONNUMERIC)\n r = shapefile.Reader(input_path)\n input_shapes = r.shapeRecords()\n nr_of_shapes_in_file = len(input_shapes)\n logging.info(\"{} shapes in file '{}' will be transformed\".format(\n nr_of_shapes_in_file, input_path))\n field_names = [str(i[0]) for i in r.fields]\n field_names.remove('DeletionFlag')\n logging.info('fieldNames in shapefile: {}'.format(field_names))\n input_projection = pyproj.Proj(src_projection)\n output_projection = pyproj.Proj(dest_projection)\n logging.info('shapeType read: {}'.format(r.shapeType))\n counter = 0\n pbar = ProgressBar(widgets=widgets, maxval=nr_of_shapes_in_file).start()\n for input_shape in input_shapes:\n nr_of_points_in_shape = len(input_shape.shape.points)\n result_entry = OrderedDict()\n for input_field in input_fields:\n key = field_names.index(input_field)\n input_record = input_shape.record\n input_entry = input_record[key]\n if isinstance(input_entry, list):\n input_entry = int_array_to_string(input_entry)\n if input_field == 'HECTOMTRNG':\n input_entry = input_record[key] / 10.0\n result_entry[input_field] = input_entry\n if nr_of_points_in_shape == 1:\n input_x = input_shape.shape.points[0][0]\n input_y = input_shape.shape.points[0][1]\n x, y = pyproj.transform(input_projection, output_projection,\n input_x, input_y)\n logging.debug(field_names)\n logging.debug([str(i) for i in input_record])\n logging.debug(\n 'Rijksdriehoekstelsel_New ({:-f}, {:-f}) becomes WGS84 ({:-f}, {:-f})'\n .format(input_x, input_y, x, y))\n result_entry['LONGITUDE'] = x\n result_entry['LATITUDE'] = y\n else:\n logging.debug('number of points for this shape was >1, it was: {}'\n .format(nr_of_points_in_shape))\n headers = result_entry.keys()\n if counter == 0:\n writer.writerow(headers)\n line = []\n for field in headers:\n line.append(result_entry[field])\n writer.writerow(line)\n counter += 1\n pbar.update(counter)\n csv_file.close()\n pbar.finish()\n logging.info(\"FINISHED processing - saved file '{}'\".format(\n output_filename))\n\n\ndef int_array_to_string(input_array):\n return '-'.join(str(i) for i in input_array)\n\n\ndef merge_shapefile_csvs(input_hectopunten, input_wegvakken, merge_on_field,\n fields_to_keep, fields_rename_mapping, output_filename):\n logging.info(\"START merging csv files '{}' and '{}' to file '{}'\".\n format(input_hectopunten, input_wegvakken, output_filename))\n hectopunten_df = pandas.read_csv(input_hectopunten)\n wegvakken_df = pandas.read_csv(input_wegvakken)\n merged_df = pandas.merge(hectopunten_df, wegvakken_df, on=merge_on_field)\n merged_df['ID'] = merged_df.index\n result_df = merged_df[fields_to_keep]\n result_df = result_df.rename(columns=fields_rename_mapping)\n result_df.to_csv(output_filename, mode='wb', index=False, header=True,\n delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n logging.info(\"FINISHED merging csv files - saved file '{}'\".format(\n output_filename))\n\n\ninput_projection_string = '+init=EPSG:28992'\noutput_projection_string = '+init=EPSG:4326'\nshp_hectopunten = 'input/Hectopunten/Hectopunten'\nshp_wegvakken = 'input/Wegvakken/Wegvakken'\ncsv_hectopunten = 'output/Hectopunten.csv'\ncsv_wegvakken = 'output/Wegvakken.csv'\ncsv_merged = 'output/merged.csv'\nshp_transform_to_different_projection(shp_hectopunten,\n HECTOPUNTEN_OUTPUT_FIELDS, input_projection_string,\n output_projection_string, csv_hectopunten)\nshp_transform_to_different_projection(shp_wegvakken,\n WEGVAKKEN_OUTPUT_FIELDS, input_projection_string,\n output_projection_string, csv_wegvakken)\nmerge_shapefile_csvs(csv_hectopunten, csv_wegvakken, 'WVK_ID',\n MERGED_OUTPUT_FIELDS, MERGED_RENAME_FIELDS_MAPPING, csv_merged)\n",
"__author__ = 'djvdorp'\n<import token>\nHECTOPUNTEN_OUTPUT_FIELDS = ['HECTOMTRNG', 'AFSTAND', 'WVK_ID', 'WVK_BEGDAT']\nWEGVAKKEN_OUTPUT_FIELDS = ['WVK_ID', 'WVK_BEGDAT', 'JTE_ID_BEG',\n 'JTE_ID_END', 'WEGBEHSRT', 'WEGNUMMER', 'WEGDEELLTR', 'HECTO_LTTR',\n 'BAANSUBSRT', 'RPE_CODE', 'ADMRICHTNG', 'RIJRICHTNG', 'STT_NAAM',\n 'WPSNAAMNEN', 'GME_ID', 'GME_NAAM', 'HNRSTRLNKS', 'HNRSTRRHTS',\n 'E_HNR_LNKS', 'E_HNR_RHTS', 'L_HNR_LNKS', 'L_HNR_RHTS', 'BEGAFSTAND',\n 'ENDAFSTAND', 'BEGINKM', 'EINDKM', 'POS_TV_WOL']\nMERGED_OUTPUT_FIELDS = ['ID', 'WEGNUMMER', 'HECTOMTRNG', 'LONGITUDE',\n 'LATITUDE', 'STT_NAAM', 'GME_NAAM', 'WEGBEHSRT', 'RPE_CODE',\n 'POS_TV_WOL', 'WEGDEELLTR', 'HECTO_LTTR', 'BAANSUBSRT']\nMERGED_RENAME_FIELDS_MAPPING = {'ID': 'HP_ID', 'WEGNUMMER': 'WEGNR',\n 'HECTOMTRNG': 'HECTONR'}\nlogging.basicConfig(level=logging.INFO)\nwidgets = ['Processing: ', Percentage(), ' ', Bar(marker=RotatingMarker()),\n ' ', ETA()]\n\n\ndef shp_transform_to_different_projection(input_path, input_fields,\n src_projection, dest_projection, output_filename):\n logging.info(\"START processing shapefile '{}' to '{}'\".format(\n input_path, output_filename))\n csv_file = open(output_filename, 'wb')\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv\n .QUOTE_NONNUMERIC)\n r = shapefile.Reader(input_path)\n input_shapes = r.shapeRecords()\n nr_of_shapes_in_file = len(input_shapes)\n logging.info(\"{} shapes in file '{}' will be transformed\".format(\n nr_of_shapes_in_file, input_path))\n field_names = [str(i[0]) for i in r.fields]\n field_names.remove('DeletionFlag')\n logging.info('fieldNames in shapefile: {}'.format(field_names))\n input_projection = pyproj.Proj(src_projection)\n output_projection = pyproj.Proj(dest_projection)\n logging.info('shapeType read: {}'.format(r.shapeType))\n counter = 0\n pbar = ProgressBar(widgets=widgets, maxval=nr_of_shapes_in_file).start()\n for input_shape in input_shapes:\n nr_of_points_in_shape = len(input_shape.shape.points)\n result_entry = OrderedDict()\n for input_field in input_fields:\n key = field_names.index(input_field)\n input_record = input_shape.record\n input_entry = input_record[key]\n if isinstance(input_entry, list):\n input_entry = int_array_to_string(input_entry)\n if input_field == 'HECTOMTRNG':\n input_entry = input_record[key] / 10.0\n result_entry[input_field] = input_entry\n if nr_of_points_in_shape == 1:\n input_x = input_shape.shape.points[0][0]\n input_y = input_shape.shape.points[0][1]\n x, y = pyproj.transform(input_projection, output_projection,\n input_x, input_y)\n logging.debug(field_names)\n logging.debug([str(i) for i in input_record])\n logging.debug(\n 'Rijksdriehoekstelsel_New ({:-f}, {:-f}) becomes WGS84 ({:-f}, {:-f})'\n .format(input_x, input_y, x, y))\n result_entry['LONGITUDE'] = x\n result_entry['LATITUDE'] = y\n else:\n logging.debug('number of points for this shape was >1, it was: {}'\n .format(nr_of_points_in_shape))\n headers = result_entry.keys()\n if counter == 0:\n writer.writerow(headers)\n line = []\n for field in headers:\n line.append(result_entry[field])\n writer.writerow(line)\n counter += 1\n pbar.update(counter)\n csv_file.close()\n pbar.finish()\n logging.info(\"FINISHED processing - saved file '{}'\".format(\n output_filename))\n\n\ndef int_array_to_string(input_array):\n return '-'.join(str(i) for i in input_array)\n\n\ndef merge_shapefile_csvs(input_hectopunten, input_wegvakken, merge_on_field,\n fields_to_keep, fields_rename_mapping, output_filename):\n logging.info(\"START merging csv files '{}' and '{}' to file '{}'\".\n format(input_hectopunten, input_wegvakken, output_filename))\n hectopunten_df = pandas.read_csv(input_hectopunten)\n wegvakken_df = pandas.read_csv(input_wegvakken)\n merged_df = pandas.merge(hectopunten_df, wegvakken_df, on=merge_on_field)\n merged_df['ID'] = merged_df.index\n result_df = merged_df[fields_to_keep]\n result_df = result_df.rename(columns=fields_rename_mapping)\n result_df.to_csv(output_filename, mode='wb', index=False, header=True,\n delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n logging.info(\"FINISHED merging csv files - saved file '{}'\".format(\n output_filename))\n\n\ninput_projection_string = '+init=EPSG:28992'\noutput_projection_string = '+init=EPSG:4326'\nshp_hectopunten = 'input/Hectopunten/Hectopunten'\nshp_wegvakken = 'input/Wegvakken/Wegvakken'\ncsv_hectopunten = 'output/Hectopunten.csv'\ncsv_wegvakken = 'output/Wegvakken.csv'\ncsv_merged = 'output/merged.csv'\nshp_transform_to_different_projection(shp_hectopunten,\n HECTOPUNTEN_OUTPUT_FIELDS, input_projection_string,\n output_projection_string, csv_hectopunten)\nshp_transform_to_different_projection(shp_wegvakken,\n WEGVAKKEN_OUTPUT_FIELDS, input_projection_string,\n output_projection_string, csv_wegvakken)\nmerge_shapefile_csvs(csv_hectopunten, csv_wegvakken, 'WVK_ID',\n MERGED_OUTPUT_FIELDS, MERGED_RENAME_FIELDS_MAPPING, csv_merged)\n",
"<assignment token>\n<import token>\n<assignment token>\nlogging.basicConfig(level=logging.INFO)\n<assignment token>\n\n\ndef shp_transform_to_different_projection(input_path, input_fields,\n src_projection, dest_projection, output_filename):\n logging.info(\"START processing shapefile '{}' to '{}'\".format(\n input_path, output_filename))\n csv_file = open(output_filename, 'wb')\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv\n .QUOTE_NONNUMERIC)\n r = shapefile.Reader(input_path)\n input_shapes = r.shapeRecords()\n nr_of_shapes_in_file = len(input_shapes)\n logging.info(\"{} shapes in file '{}' will be transformed\".format(\n nr_of_shapes_in_file, input_path))\n field_names = [str(i[0]) for i in r.fields]\n field_names.remove('DeletionFlag')\n logging.info('fieldNames in shapefile: {}'.format(field_names))\n input_projection = pyproj.Proj(src_projection)\n output_projection = pyproj.Proj(dest_projection)\n logging.info('shapeType read: {}'.format(r.shapeType))\n counter = 0\n pbar = ProgressBar(widgets=widgets, maxval=nr_of_shapes_in_file).start()\n for input_shape in input_shapes:\n nr_of_points_in_shape = len(input_shape.shape.points)\n result_entry = OrderedDict()\n for input_field in input_fields:\n key = field_names.index(input_field)\n input_record = input_shape.record\n input_entry = input_record[key]\n if isinstance(input_entry, list):\n input_entry = int_array_to_string(input_entry)\n if input_field == 'HECTOMTRNG':\n input_entry = input_record[key] / 10.0\n result_entry[input_field] = input_entry\n if nr_of_points_in_shape == 1:\n input_x = input_shape.shape.points[0][0]\n input_y = input_shape.shape.points[0][1]\n x, y = pyproj.transform(input_projection, output_projection,\n input_x, input_y)\n logging.debug(field_names)\n logging.debug([str(i) for i in input_record])\n logging.debug(\n 'Rijksdriehoekstelsel_New ({:-f}, {:-f}) becomes WGS84 ({:-f}, {:-f})'\n .format(input_x, input_y, x, y))\n result_entry['LONGITUDE'] = x\n result_entry['LATITUDE'] = y\n else:\n logging.debug('number of points for this shape was >1, it was: {}'\n .format(nr_of_points_in_shape))\n headers = result_entry.keys()\n if counter == 0:\n writer.writerow(headers)\n line = []\n for field in headers:\n line.append(result_entry[field])\n writer.writerow(line)\n counter += 1\n pbar.update(counter)\n csv_file.close()\n pbar.finish()\n logging.info(\"FINISHED processing - saved file '{}'\".format(\n output_filename))\n\n\ndef int_array_to_string(input_array):\n return '-'.join(str(i) for i in input_array)\n\n\ndef merge_shapefile_csvs(input_hectopunten, input_wegvakken, merge_on_field,\n fields_to_keep, fields_rename_mapping, output_filename):\n logging.info(\"START merging csv files '{}' and '{}' to file '{}'\".\n format(input_hectopunten, input_wegvakken, output_filename))\n hectopunten_df = pandas.read_csv(input_hectopunten)\n wegvakken_df = pandas.read_csv(input_wegvakken)\n merged_df = pandas.merge(hectopunten_df, wegvakken_df, on=merge_on_field)\n merged_df['ID'] = merged_df.index\n result_df = merged_df[fields_to_keep]\n result_df = result_df.rename(columns=fields_rename_mapping)\n result_df.to_csv(output_filename, mode='wb', index=False, header=True,\n delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n logging.info(\"FINISHED merging csv files - saved file '{}'\".format(\n output_filename))\n\n\n<assignment token>\nshp_transform_to_different_projection(shp_hectopunten,\n HECTOPUNTEN_OUTPUT_FIELDS, input_projection_string,\n output_projection_string, csv_hectopunten)\nshp_transform_to_different_projection(shp_wegvakken,\n WEGVAKKEN_OUTPUT_FIELDS, input_projection_string,\n output_projection_string, csv_wegvakken)\nmerge_shapefile_csvs(csv_hectopunten, csv_wegvakken, 'WVK_ID',\n MERGED_OUTPUT_FIELDS, MERGED_RENAME_FIELDS_MAPPING, csv_merged)\n",
"<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef shp_transform_to_different_projection(input_path, input_fields,\n src_projection, dest_projection, output_filename):\n logging.info(\"START processing shapefile '{}' to '{}'\".format(\n input_path, output_filename))\n csv_file = open(output_filename, 'wb')\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv\n .QUOTE_NONNUMERIC)\n r = shapefile.Reader(input_path)\n input_shapes = r.shapeRecords()\n nr_of_shapes_in_file = len(input_shapes)\n logging.info(\"{} shapes in file '{}' will be transformed\".format(\n nr_of_shapes_in_file, input_path))\n field_names = [str(i[0]) for i in r.fields]\n field_names.remove('DeletionFlag')\n logging.info('fieldNames in shapefile: {}'.format(field_names))\n input_projection = pyproj.Proj(src_projection)\n output_projection = pyproj.Proj(dest_projection)\n logging.info('shapeType read: {}'.format(r.shapeType))\n counter = 0\n pbar = ProgressBar(widgets=widgets, maxval=nr_of_shapes_in_file).start()\n for input_shape in input_shapes:\n nr_of_points_in_shape = len(input_shape.shape.points)\n result_entry = OrderedDict()\n for input_field in input_fields:\n key = field_names.index(input_field)\n input_record = input_shape.record\n input_entry = input_record[key]\n if isinstance(input_entry, list):\n input_entry = int_array_to_string(input_entry)\n if input_field == 'HECTOMTRNG':\n input_entry = input_record[key] / 10.0\n result_entry[input_field] = input_entry\n if nr_of_points_in_shape == 1:\n input_x = input_shape.shape.points[0][0]\n input_y = input_shape.shape.points[0][1]\n x, y = pyproj.transform(input_projection, output_projection,\n input_x, input_y)\n logging.debug(field_names)\n logging.debug([str(i) for i in input_record])\n logging.debug(\n 'Rijksdriehoekstelsel_New ({:-f}, {:-f}) becomes WGS84 ({:-f}, {:-f})'\n .format(input_x, input_y, x, y))\n result_entry['LONGITUDE'] = x\n result_entry['LATITUDE'] = y\n else:\n logging.debug('number of points for this shape was >1, it was: {}'\n .format(nr_of_points_in_shape))\n headers = result_entry.keys()\n if counter == 0:\n writer.writerow(headers)\n line = []\n for field in headers:\n line.append(result_entry[field])\n writer.writerow(line)\n counter += 1\n pbar.update(counter)\n csv_file.close()\n pbar.finish()\n logging.info(\"FINISHED processing - saved file '{}'\".format(\n output_filename))\n\n\ndef int_array_to_string(input_array):\n return '-'.join(str(i) for i in input_array)\n\n\ndef merge_shapefile_csvs(input_hectopunten, input_wegvakken, merge_on_field,\n fields_to_keep, fields_rename_mapping, output_filename):\n logging.info(\"START merging csv files '{}' and '{}' to file '{}'\".\n format(input_hectopunten, input_wegvakken, output_filename))\n hectopunten_df = pandas.read_csv(input_hectopunten)\n wegvakken_df = pandas.read_csv(input_wegvakken)\n merged_df = pandas.merge(hectopunten_df, wegvakken_df, on=merge_on_field)\n merged_df['ID'] = merged_df.index\n result_df = merged_df[fields_to_keep]\n result_df = result_df.rename(columns=fields_rename_mapping)\n result_df.to_csv(output_filename, mode='wb', index=False, header=True,\n delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n logging.info(\"FINISHED merging csv files - saved file '{}'\".format(\n output_filename))\n\n\n<assignment token>\n<code token>\n",
"<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef shp_transform_to_different_projection(input_path, input_fields,\n src_projection, dest_projection, output_filename):\n logging.info(\"START processing shapefile '{}' to '{}'\".format(\n input_path, output_filename))\n csv_file = open(output_filename, 'wb')\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv\n .QUOTE_NONNUMERIC)\n r = shapefile.Reader(input_path)\n input_shapes = r.shapeRecords()\n nr_of_shapes_in_file = len(input_shapes)\n logging.info(\"{} shapes in file '{}' will be transformed\".format(\n nr_of_shapes_in_file, input_path))\n field_names = [str(i[0]) for i in r.fields]\n field_names.remove('DeletionFlag')\n logging.info('fieldNames in shapefile: {}'.format(field_names))\n input_projection = pyproj.Proj(src_projection)\n output_projection = pyproj.Proj(dest_projection)\n logging.info('shapeType read: {}'.format(r.shapeType))\n counter = 0\n pbar = ProgressBar(widgets=widgets, maxval=nr_of_shapes_in_file).start()\n for input_shape in input_shapes:\n nr_of_points_in_shape = len(input_shape.shape.points)\n result_entry = OrderedDict()\n for input_field in input_fields:\n key = field_names.index(input_field)\n input_record = input_shape.record\n input_entry = input_record[key]\n if isinstance(input_entry, list):\n input_entry = int_array_to_string(input_entry)\n if input_field == 'HECTOMTRNG':\n input_entry = input_record[key] / 10.0\n result_entry[input_field] = input_entry\n if nr_of_points_in_shape == 1:\n input_x = input_shape.shape.points[0][0]\n input_y = input_shape.shape.points[0][1]\n x, y = pyproj.transform(input_projection, output_projection,\n input_x, input_y)\n logging.debug(field_names)\n logging.debug([str(i) for i in input_record])\n logging.debug(\n 'Rijksdriehoekstelsel_New ({:-f}, {:-f}) becomes WGS84 ({:-f}, {:-f})'\n .format(input_x, input_y, x, y))\n result_entry['LONGITUDE'] = x\n result_entry['LATITUDE'] = y\n else:\n logging.debug('number of points for this shape was >1, it was: {}'\n .format(nr_of_points_in_shape))\n headers = result_entry.keys()\n if counter == 0:\n writer.writerow(headers)\n line = []\n for field in headers:\n line.append(result_entry[field])\n writer.writerow(line)\n counter += 1\n pbar.update(counter)\n csv_file.close()\n pbar.finish()\n logging.info(\"FINISHED processing - saved file '{}'\".format(\n output_filename))\n\n\n<function token>\n\n\ndef merge_shapefile_csvs(input_hectopunten, input_wegvakken, merge_on_field,\n fields_to_keep, fields_rename_mapping, output_filename):\n logging.info(\"START merging csv files '{}' and '{}' to file '{}'\".\n format(input_hectopunten, input_wegvakken, output_filename))\n hectopunten_df = pandas.read_csv(input_hectopunten)\n wegvakken_df = pandas.read_csv(input_wegvakken)\n merged_df = pandas.merge(hectopunten_df, wegvakken_df, on=merge_on_field)\n merged_df['ID'] = merged_df.index\n result_df = merged_df[fields_to_keep]\n result_df = result_df.rename(columns=fields_rename_mapping)\n result_df.to_csv(output_filename, mode='wb', index=False, header=True,\n delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n logging.info(\"FINISHED merging csv files - saved file '{}'\".format(\n output_filename))\n\n\n<assignment token>\n<code token>\n",
"<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef shp_transform_to_different_projection(input_path, input_fields,\n src_projection, dest_projection, output_filename):\n logging.info(\"START processing shapefile '{}' to '{}'\".format(\n input_path, output_filename))\n csv_file = open(output_filename, 'wb')\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv\n .QUOTE_NONNUMERIC)\n r = shapefile.Reader(input_path)\n input_shapes = r.shapeRecords()\n nr_of_shapes_in_file = len(input_shapes)\n logging.info(\"{} shapes in file '{}' will be transformed\".format(\n nr_of_shapes_in_file, input_path))\n field_names = [str(i[0]) for i in r.fields]\n field_names.remove('DeletionFlag')\n logging.info('fieldNames in shapefile: {}'.format(field_names))\n input_projection = pyproj.Proj(src_projection)\n output_projection = pyproj.Proj(dest_projection)\n logging.info('shapeType read: {}'.format(r.shapeType))\n counter = 0\n pbar = ProgressBar(widgets=widgets, maxval=nr_of_shapes_in_file).start()\n for input_shape in input_shapes:\n nr_of_points_in_shape = len(input_shape.shape.points)\n result_entry = OrderedDict()\n for input_field in input_fields:\n key = field_names.index(input_field)\n input_record = input_shape.record\n input_entry = input_record[key]\n if isinstance(input_entry, list):\n input_entry = int_array_to_string(input_entry)\n if input_field == 'HECTOMTRNG':\n input_entry = input_record[key] / 10.0\n result_entry[input_field] = input_entry\n if nr_of_points_in_shape == 1:\n input_x = input_shape.shape.points[0][0]\n input_y = input_shape.shape.points[0][1]\n x, y = pyproj.transform(input_projection, output_projection,\n input_x, input_y)\n logging.debug(field_names)\n logging.debug([str(i) for i in input_record])\n logging.debug(\n 'Rijksdriehoekstelsel_New ({:-f}, {:-f}) becomes WGS84 ({:-f}, {:-f})'\n .format(input_x, input_y, x, y))\n result_entry['LONGITUDE'] = x\n result_entry['LATITUDE'] = y\n else:\n logging.debug('number of points for this shape was >1, it was: {}'\n .format(nr_of_points_in_shape))\n headers = result_entry.keys()\n if counter == 0:\n writer.writerow(headers)\n line = []\n for field in headers:\n line.append(result_entry[field])\n writer.writerow(line)\n counter += 1\n pbar.update(counter)\n csv_file.close()\n pbar.finish()\n logging.info(\"FINISHED processing - saved file '{}'\".format(\n output_filename))\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n",
"<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
98,337 |
5efe8e3660934c32f4185028efa188830319c336
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 17 11:07:45 2021
@author: legon
"""
import sympy
print('Matrix([[a,b],[c,d]]')
a = int(input('a:'))
b = int(input('b:'))
c = int(input('c:'))
d = int(input('d:'))
mod = int(input('mod:'))
print()
A = sympy.Matrix([[a,b],[c,d]])
A2 = A.det()
# 最小正剰余
if A2 < 0:
A3 = A2 + mod
elif A2 < mod:
A3 = mod
else:
A3 = A2 % mod
print('A =',A) # 行列の表示
A_1 = A.inv()
print('A**(-1) =',A_1) # #逆行列の表示
print()
print('----------------------------------------')
print('#',A3,'x ≡ 1 (mod',mod,')を解く')
print('----------------------------------------')
print('|A| =',A2,'≡',A3,'(mod)',mod) #
print()
A4 = A2 * A_1
print('A**(-1) ≡',A4,'より')
print()
x,y,t = sympy.gcdex(A3,mod)
A5 = x * A4
a,b,c,d = A5
a %= mod
b %= mod
c %= mod
d %= mod
A6 = sympy.Matrix([[a,b],[c,d]])
print('A**(-1) ≡',x,'*',A4)
print('\t≡',A5)
print('\t≡',A6,'(mod',mod,')')
print()
E = A * A6
a,b,c,d = E
a %= mod
b %= mod
c %= mod
d %= mod
E = sympy.Matrix([[a,b],[c,d]])
print('検算:',A,'*',A6,'≡',E)
|
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 17 11:07:45 2021\n\n@author: legon\n\"\"\"\n\nimport sympy\n\nprint('Matrix([[a,b],[c,d]]')\n\na = int(input('a:'))\nb = int(input('b:'))\nc = int(input('c:'))\nd = int(input('d:'))\nmod = int(input('mod:'))\nprint()\n\nA = sympy.Matrix([[a,b],[c,d]])\nA2 = A.det()\n\n# 最小正剰余\nif A2 < 0:\n A3 = A2 + mod\nelif A2 < mod:\n A3 = mod\nelse:\n A3 = A2 % mod\n\nprint('A =',A) # 行列の表示\nA_1 = A.inv()\nprint('A**(-1) =',A_1) # #逆行列の表示\nprint()\n\nprint('----------------------------------------')\nprint('#',A3,'x ≡ 1 (mod',mod,')を解く')\nprint('----------------------------------------')\nprint('|A| =',A2,'≡',A3,'(mod)',mod) # \nprint()\n\nA4 = A2 * A_1\nprint('A**(-1) ≡',A4,'より')\nprint()\n\nx,y,t = sympy.gcdex(A3,mod)\nA5 = x * A4\na,b,c,d = A5\na %= mod\nb %= mod\nc %= mod\nd %= mod\nA6 = sympy.Matrix([[a,b],[c,d]])\nprint('A**(-1) ≡',x,'*',A4)\nprint('\\t≡',A5)\nprint('\\t≡',A6,'(mod',mod,')')\nprint()\n\nE = A * A6\na,b,c,d = E\na %= mod\nb %= mod\nc %= mod\nd %= mod\nE = sympy.Matrix([[a,b],[c,d]])\nprint('検算:',A,'*',A6,'≡',E)\n",
"<docstring token>\nimport sympy\nprint('Matrix([[a,b],[c,d]]')\na = int(input('a:'))\nb = int(input('b:'))\nc = int(input('c:'))\nd = int(input('d:'))\nmod = int(input('mod:'))\nprint()\nA = sympy.Matrix([[a, b], [c, d]])\nA2 = A.det()\nif A2 < 0:\n A3 = A2 + mod\nelif A2 < mod:\n A3 = mod\nelse:\n A3 = A2 % mod\nprint('A =', A)\nA_1 = A.inv()\nprint('A**(-1) =', A_1)\nprint()\nprint('----------------------------------------')\nprint('#', A3, 'x ≡ 1 (mod', mod, ')を解く')\nprint('----------------------------------------')\nprint('|A| =', A2, '≡', A3, '(mod)', mod)\nprint()\nA4 = A2 * A_1\nprint('A**(-1) ≡', A4, 'より')\nprint()\nx, y, t = sympy.gcdex(A3, mod)\nA5 = x * A4\na, b, c, d = A5\na %= mod\nb %= mod\nc %= mod\nd %= mod\nA6 = sympy.Matrix([[a, b], [c, d]])\nprint('A**(-1) ≡', x, '*', A4)\nprint('\\t≡', A5)\nprint('\\t≡', A6, '(mod', mod, ')')\nprint()\nE = A * A6\na, b, c, d = E\na %= mod\nb %= mod\nc %= mod\nd %= mod\nE = sympy.Matrix([[a, b], [c, d]])\nprint('検算:', A, '*', A6, '≡', E)\n",
"<docstring token>\n<import token>\nprint('Matrix([[a,b],[c,d]]')\na = int(input('a:'))\nb = int(input('b:'))\nc = int(input('c:'))\nd = int(input('d:'))\nmod = int(input('mod:'))\nprint()\nA = sympy.Matrix([[a, b], [c, d]])\nA2 = A.det()\nif A2 < 0:\n A3 = A2 + mod\nelif A2 < mod:\n A3 = mod\nelse:\n A3 = A2 % mod\nprint('A =', A)\nA_1 = A.inv()\nprint('A**(-1) =', A_1)\nprint()\nprint('----------------------------------------')\nprint('#', A3, 'x ≡ 1 (mod', mod, ')を解く')\nprint('----------------------------------------')\nprint('|A| =', A2, '≡', A3, '(mod)', mod)\nprint()\nA4 = A2 * A_1\nprint('A**(-1) ≡', A4, 'より')\nprint()\nx, y, t = sympy.gcdex(A3, mod)\nA5 = x * A4\na, b, c, d = A5\na %= mod\nb %= mod\nc %= mod\nd %= mod\nA6 = sympy.Matrix([[a, b], [c, d]])\nprint('A**(-1) ≡', x, '*', A4)\nprint('\\t≡', A5)\nprint('\\t≡', A6, '(mod', mod, ')')\nprint()\nE = A * A6\na, b, c, d = E\na %= mod\nb %= mod\nc %= mod\nd %= mod\nE = sympy.Matrix([[a, b], [c, d]])\nprint('検算:', A, '*', A6, '≡', E)\n",
"<docstring token>\n<import token>\nprint('Matrix([[a,b],[c,d]]')\n<assignment token>\nprint()\n<assignment token>\nif A2 < 0:\n A3 = A2 + mod\nelif A2 < mod:\n A3 = mod\nelse:\n A3 = A2 % mod\nprint('A =', A)\n<assignment token>\nprint('A**(-1) =', A_1)\nprint()\nprint('----------------------------------------')\nprint('#', A3, 'x ≡ 1 (mod', mod, ')を解く')\nprint('----------------------------------------')\nprint('|A| =', A2, '≡', A3, '(mod)', mod)\nprint()\n<assignment token>\nprint('A**(-1) ≡', A4, 'より')\nprint()\n<assignment token>\na %= mod\nb %= mod\nc %= mod\nd %= mod\n<assignment token>\nprint('A**(-1) ≡', x, '*', A4)\nprint('\\t≡', A5)\nprint('\\t≡', A6, '(mod', mod, ')')\nprint()\n<assignment token>\na %= mod\nb %= mod\nc %= mod\nd %= mod\n<assignment token>\nprint('検算:', A, '*', A6, '≡', E)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,338 |
169a4529a6e249cbd506be9b16395bb3f43c23bf
|
from reader.lector import Reader
|
[
"from reader.lector import Reader",
"from reader.lector import Reader\n",
"<import token>\n"
] | false |
98,339 |
249fd3b9083be06b61482151f4f213c3a24bd01b
|
#!/usr/bin/env python
"""
Trivial parser to help with HL7 message debugging.
"""
import glob
from optparse import OptionParser
import sys
import os.path
usage = """%prog [options] segment sequence[,sequence]* file[s]ToParse
This will echo to stdout all the matches found for the given parameters.
Try `%prog --help` for additional information.
segment restricted to segments of this type, i.e. 'MSH' or 'PV1'
sequence parser splits each HL7 message on the '|' delimiter;
which sequence[s] to display, multiple sequences separated
by commas are supported, i.e. 6,12,44. NB, MSH counts
different from all other segments, as the field separator
'|' counts as sequence one.
file[s]ToParse one or more files to parse for matches; glob pattern
support included
"""
class Parser(object):
def __init__(self):
self.segments_of_interest = ""
self.sequences = []
self.filelist = []
self.show_ADT = False
self.show_file = False
self.show_time = False
self.show_visitID = False
self.show_pc = False
def processArgs(self, argv):
""" Process any optional arguments and possitional parameters
"""
parser = OptionParser(usage=usage)
parser.add_option("-a", "--show_ADT", action="store_true", dest="show_ADT",
default=self.show_ADT, help="Display ADT value if set")
parser.add_option("-f", "--show_file", action="store_true", dest="show_file",
default=self.show_file, help="Display matching filename if set")
parser.add_option("-t", "--show_time", action="store_true", dest="show_time",
default=self.show_time, help="Display message time")
parser.add_option("-v", "--show_visitID", action="store_true", dest="show_visitID",
default=self.show_visitID, help="Display visit ID")
parser.add_option("-p", "--show_pc",
action="store_true",
dest="show_pc",
default=self.show_pc,
help="Display patient class")
(options, pargs) = parser.parse_args()
if len(pargs) < 3:
parser.error("incorrect number of arguments")
self.show_ADT = parser.values.show_ADT
self.show_file = parser.values.show_file
self.show_time = parser.values.show_time
self.show_visitID = parser.values.show_visitID
self.show_pc = parser.values.show_pc
self.segments_of_interest = pargs.pop(0)
if len(self.segments_of_interest) != 3:
parser.error("segment '%s' looks incorrect, expected something like 'PV1'"
% self.segments_of_interest)
try:
nums = pargs.pop(0).split(",")
for num in nums:
if 'MSH' == self.segments_of_interest:
num = int(num) - 1
self.sequences.append(int(num))
except:
parser.error("sequence must be an integer, separate multiple w/ comma and no spaces")
for patternOrFile in pargs:
for file in glob.glob(patternOrFile):
if not os.path.isfile(file):
parser.error("can't open input file %s" % file)
self.filelist.append(file)
# Require at least one file
if not len(self.filelist):
parser.error("at least one input file is required")
def parse(self):
for filename in self.filelist:
if self.show_file:
print "READING FILE:",filename
FILE = open(filename, "r")
# by default, the files don't contain newlines. read the whole
# thing in and split on \r
raw = FILE.read()
# occasionally we have a newline type file from a direct
# SQL query or the like - convert back
if raw.find('\n') > 0:
raw = raw.replace('\n','\r')
if raw.find('\\r') > 0:
raw = raw.replace('\\r','\r')
for l in raw.split('\r'):
# hang onto useful message header info and purge
# potentials from the previous message
if 'MSH' == l[0:3]:
sequences = l.split('|')
TIME = sequences[6]
ADT = sequences[8]
PATIENTCLASS, VISITID = '', ''
# hang onto visit id if requested
if self.show_visitID and 'PID' == l[0:3]:
sequences = l.split('|')
VISITID = sequences[18]
# hang onto patient_class if requested
if self.show_pc and 'PV1' == l[0:3]:
sequences = l.split('|')
PATIENTCLASS = sequences[2].split('^')[0]
# print this out if it matches
if self.segments_of_interest == l[0:3]:
sequences = l.split('|')
out = "|".join(
[sequences[e] for e in self.sequences if e < len(sequences)])
# strip newlines
out = out.replace("\n","")
if out:
if self.show_time:
out = ":".join([TIME,out])
if self.show_ADT:
out = ":".join([ADT,out])
if self.show_pc:
out = ":".join([PATIENTCLASS,out])
if self.show_visitID:
out = ":".join([VISITID,out])
print out
def main():
parser = Parser()
parser.processArgs(sys.argv[1:])
parser.parse()
if __name__ == '__main__':
main()
|
[
"#!/usr/bin/env python\n\"\"\"\nTrivial parser to help with HL7 message debugging.\n\"\"\"\nimport glob\nfrom optparse import OptionParser\nimport sys\nimport os.path\n\nusage = \"\"\"%prog [options] segment sequence[,sequence]* file[s]ToParse\n\nThis will echo to stdout all the matches found for the given parameters.\nTry `%prog --help` for additional information.\n\n segment restricted to segments of this type, i.e. 'MSH' or 'PV1'\n sequence parser splits each HL7 message on the '|' delimiter;\n which sequence[s] to display, multiple sequences separated\n by commas are supported, i.e. 6,12,44. NB, MSH counts\n different from all other segments, as the field separator\n '|' counts as sequence one.\n file[s]ToParse one or more files to parse for matches; glob pattern\n support included\n\"\"\"\n\nclass Parser(object):\n \n def __init__(self):\n self.segments_of_interest = \"\"\n self.sequences = []\n self.filelist = []\n self.show_ADT = False\n self.show_file = False\n self.show_time = False\n self.show_visitID = False\n self.show_pc = False\n\n def processArgs(self, argv):\n \"\"\" Process any optional arguments and possitional parameters\n \"\"\"\n parser = OptionParser(usage=usage)\n parser.add_option(\"-a\", \"--show_ADT\", action=\"store_true\", dest=\"show_ADT\",\n default=self.show_ADT, help=\"Display ADT value if set\")\n parser.add_option(\"-f\", \"--show_file\", action=\"store_true\", dest=\"show_file\",\n default=self.show_file, help=\"Display matching filename if set\")\n parser.add_option(\"-t\", \"--show_time\", action=\"store_true\", dest=\"show_time\",\n default=self.show_time, help=\"Display message time\")\n parser.add_option(\"-v\", \"--show_visitID\", action=\"store_true\", dest=\"show_visitID\",\n default=self.show_visitID, help=\"Display visit ID\")\n parser.add_option(\"-p\", \"--show_pc\",\n action=\"store_true\",\n dest=\"show_pc\",\n default=self.show_pc,\n help=\"Display patient class\")\n\n (options, pargs) = parser.parse_args()\n if len(pargs) < 3:\n parser.error(\"incorrect number of arguments\")\n\n self.show_ADT = parser.values.show_ADT\n self.show_file = parser.values.show_file\n self.show_time = parser.values.show_time\n self.show_visitID = parser.values.show_visitID\n self.show_pc = parser.values.show_pc\n \n self.segments_of_interest = pargs.pop(0)\n if len(self.segments_of_interest) != 3:\n parser.error(\"segment '%s' looks incorrect, expected something like 'PV1'\"\n % self.segments_of_interest)\n\n try:\n nums = pargs.pop(0).split(\",\")\n for num in nums:\n if 'MSH' == self.segments_of_interest:\n num = int(num) - 1\n self.sequences.append(int(num))\n except:\n parser.error(\"sequence must be an integer, separate multiple w/ comma and no spaces\")\n\n for patternOrFile in pargs:\n for file in glob.glob(patternOrFile):\n if not os.path.isfile(file):\n parser.error(\"can't open input file %s\" % file)\n self.filelist.append(file)\n \n # Require at least one file\n if not len(self.filelist):\n parser.error(\"at least one input file is required\")\n\n def parse(self):\n for filename in self.filelist:\n if self.show_file:\n print \"READING FILE:\",filename\n\n FILE = open(filename, \"r\")\n # by default, the files don't contain newlines. read the whole\n # thing in and split on \\r\n raw = FILE.read()\n\n # occasionally we have a newline type file from a direct\n # SQL query or the like - convert back\n if raw.find('\\n') > 0:\n raw = raw.replace('\\n','\\r')\n if raw.find('\\\\r') > 0:\n raw = raw.replace('\\\\r','\\r')\n\n for l in raw.split('\\r'):\n # hang onto useful message header info and purge\n # potentials from the previous message\n if 'MSH' == l[0:3]:\n sequences = l.split('|')\n TIME = sequences[6]\n ADT = sequences[8]\n PATIENTCLASS, VISITID = '', ''\n\n # hang onto visit id if requested\n if self.show_visitID and 'PID' == l[0:3]:\n sequences = l.split('|')\n VISITID = sequences[18]\n\n # hang onto patient_class if requested\n if self.show_pc and 'PV1' == l[0:3]:\n sequences = l.split('|')\n PATIENTCLASS = sequences[2].split('^')[0]\n\n # print this out if it matches\n if self.segments_of_interest == l[0:3]:\n sequences = l.split('|')\n out = \"|\".join(\n [sequences[e] for e in self.sequences if e < len(sequences)])\n # strip newlines\n out = out.replace(\"\\n\",\"\")\n if out:\n if self.show_time:\n out = \":\".join([TIME,out])\n if self.show_ADT:\n out = \":\".join([ADT,out])\n if self.show_pc:\n out = \":\".join([PATIENTCLASS,out])\n if self.show_visitID:\n out = \":\".join([VISITID,out])\n\n print out\n\ndef main():\n parser = Parser()\n parser.processArgs(sys.argv[1:])\n parser.parse()\n\nif __name__ == '__main__':\n main()\n"
] | true |
98,340 |
2a921deb5dc67057a26fe9a4bee45b1538544cde
|
t = input()
a = 5*60
b = 60
c = 10
if t%10 != 0:
print -1
else:
a2 = t/a
b2 = t%a/b
c2 = t%b/c
print a2,b2,c2
|
[
"t = input()\na = 5*60\nb = 60\nc = 10\n\nif t%10 != 0: \n print -1\nelse:\n a2 = t/a\n b2 = t%a/b\n c2 = t%b/c \n print a2,b2,c2\n \n"
] | true |
98,341 |
69a94f2b753235455e254ce8d14149556962b6ae
|
from servidormodbus import ServidorMODBUS
s = ServidorMODBUS('localhost',502)
s.run()
|
[
"from servidormodbus import ServidorMODBUS\n\ns = ServidorMODBUS('localhost',502)\ns.run()",
"from servidormodbus import ServidorMODBUS\ns = ServidorMODBUS('localhost', 502)\ns.run()\n",
"<import token>\ns = ServidorMODBUS('localhost', 502)\ns.run()\n",
"<import token>\n<assignment token>\ns.run()\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,342 |
b65721e2a28eaaf0435988c35ac945ff7c7855a0
|
"""
This package contains the code that you should also have when you followed
the Babel Tutorial (:doc:`/tutorials/babel`)
"""
|
[
"\"\"\"\nThis package contains the code that you should also have when you followed \nthe Babel Tutorial (:doc:`/tutorials/babel`)\n\"\"\"\n",
"<docstring token>\n"
] | false |
98,343 |
2e55849cbca7f3a0a90b7caa5ca7e9c1844a5bc5
|
from flask import Flask
from src.main.service.DataAccessService import DataAccessService
# The Data Transformation controller, transforms the data from various datasources into standardised JSON format
class DataTransformationController:
app = Flask(__name__)
@app.route("/")
def transform():
return DataAccessService.get_data()
if __name__ == "__main__":
app.run(debug=True)
|
[
"from flask import Flask\nfrom src.main.service.DataAccessService import DataAccessService\n# The Data Transformation controller, transforms the data from various datasources into standardised JSON format\n\n\nclass DataTransformationController:\n\n app = Flask(__name__)\n\n @app.route(\"/\")\n def transform():\n return DataAccessService.get_data()\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n",
"from flask import Flask\nfrom src.main.service.DataAccessService import DataAccessService\n\n\nclass DataTransformationController:\n app = Flask(__name__)\n\n @app.route('/')\n def transform():\n return DataAccessService.get_data()\n if __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\n\n\nclass DataTransformationController:\n app = Flask(__name__)\n\n @app.route('/')\n def transform():\n return DataAccessService.get_data()\n if __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\n\n\nclass DataTransformationController:\n <assignment token>\n\n @app.route('/')\n def transform():\n return DataAccessService.get_data()\n if __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\n\n\nclass DataTransformationController:\n <assignment token>\n <function token>\n if __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\n<class token>\n"
] | false |
98,344 |
bfe446b0d244a04c9f78ecdd9c65528646b47b66
|
#no.1
print("hello world!")#hello worlde
'''zhushi'''
print"hello"
print 'hello'
|
[
"#no.1 \nprint(\"hello world!\")#hello worlde\n'''zhushi'''\nprint\"hello\"\nprint 'hello'\n"
] | true |
98,345 |
9eefc2856b8f4780dafb7878759d7c41c5e34d21
|
# -*- coding: utf-8 -*-
from gluon import current
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
# =============================================================================
class S3MainMenu(default.S3MainMenu):
""" Custom Application Main Menu """
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
menu= [MM("Call Logs", c="event", f="incident_report"),
MM("Incidents", c="event", f="incident", m="summary"),
MM("Scenarios", c="event", f="scenario"),
MM("more", link=False)(
MM("Documents", c="doc", f="document"),
MM("Events", c="event", f="event"),
MM("Staff", c="hrm", f="staff"),
MM("Volunteers", c="vol", f="volunteer"),
MM("Assets", c="asset", f="asset"),
MM("Organizations", c="org", f="organisation"),
MM("Facilities", c="org", f="facility"),
#MM("Hospitals", c="med", f="hospital", m="summary"),
MM("Shelters", c="cr", f="shelter"),
MM("Warehouses", c="inv", f="warehouse"),
MM("Item Catalog", c="supply", f="catalog_item"),
),
]
return menu
# -------------------------------------------------------------------------
@classmethod
def menu_auth(cls, **attr):
"""
Auth Menu
- switch Login to use OpenID Connect
"""
auth = current.auth
logged_in = auth.is_logged_in()
settings = current.deployment_settings
if not logged_in:
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
self_registration = settings.get_security_registration_visible()
if self_registration == "index":
register = MM("Register", c="default", f="index", m="register",
vars=dict(_next=login_next),
check=self_registration)
else:
register = MM("Register", m="register",
vars=dict(_next=login_next),
check=self_registration)
if settings.get_auth_password_changes() and \
settings.get_auth_password_retrieval():
lost_pw = MM("Lost Password", m="retrieve_password")
else:
lost_pw = None
menu_auth = MM("Login", c="default", f="openid_connect", m="login",
_id="auth_menu_login",
vars=dict(_next=login_next), **attr)(
MM("Login", m="login",
vars=dict(_next=login_next)),
register,
lost_pw,
)
else:
# Logged-in
if settings.get_auth_password_changes():
change_pw = MM("Change Password", m="change_password")
else:
change_pw = None
menu_auth = MM(auth.user.email, c="default", f="user",
translate=False, link=False, _id="auth_menu_email",
**attr)(
MM("Logout", m="logout", _id="auth_menu_logout"),
MM("User Profile", m="profile"),
MM("Personal Data", c="default", f="person", m="update"),
MM("Contact Details", c="pr", f="person",
args="contact",
vars={"person.pe_id" : auth.user.pe_id}),
#MM("Subscriptions", c="pr", f="person",
# args="pe_subscription",
# vars={"person.pe_id" : auth.user.pe_id}),
change_pw,
SEP(),
MM({"name": current.T("Rapid Data Entry"),
"id": "rapid_toggle",
"value": current.session.s3.rapid_data_entry is True},
f="rapid"),
)
return menu_auth
# END =========================================================================
|
[
"# -*- coding: utf-8 -*-\n\nfrom gluon import current\nfrom s3 import *\nfrom s3layouts import *\ntry:\n from .layouts import *\nexcept ImportError:\n pass\nimport s3menus as default\n\n# =============================================================================\nclass S3MainMenu(default.S3MainMenu):\n \"\"\" Custom Application Main Menu \"\"\"\n\n # -------------------------------------------------------------------------\n @classmethod\n def menu_modules(cls):\n \"\"\" Custom Modules Menu \"\"\"\n\n menu= [MM(\"Call Logs\", c=\"event\", f=\"incident_report\"),\n MM(\"Incidents\", c=\"event\", f=\"incident\", m=\"summary\"),\n MM(\"Scenarios\", c=\"event\", f=\"scenario\"),\n MM(\"more\", link=False)(\n MM(\"Documents\", c=\"doc\", f=\"document\"),\n MM(\"Events\", c=\"event\", f=\"event\"),\n MM(\"Staff\", c=\"hrm\", f=\"staff\"),\n MM(\"Volunteers\", c=\"vol\", f=\"volunteer\"),\n MM(\"Assets\", c=\"asset\", f=\"asset\"),\n MM(\"Organizations\", c=\"org\", f=\"organisation\"),\n MM(\"Facilities\", c=\"org\", f=\"facility\"),\n #MM(\"Hospitals\", c=\"med\", f=\"hospital\", m=\"summary\"),\n MM(\"Shelters\", c=\"cr\", f=\"shelter\"),\n MM(\"Warehouses\", c=\"inv\", f=\"warehouse\"),\n MM(\"Item Catalog\", c=\"supply\", f=\"catalog_item\"),\n ),\n ]\n\n return menu\n\n # -------------------------------------------------------------------------\n @classmethod\n def menu_auth(cls, **attr):\n \"\"\"\n Auth Menu\n - switch Login to use OpenID Connect\n \"\"\"\n\n auth = current.auth\n logged_in = auth.is_logged_in()\n settings = current.deployment_settings\n\n if not logged_in:\n request = current.request\n login_next = URL(args=request.args, vars=request.vars)\n if request.controller == \"default\" and \\\n request.function == \"user\" and \\\n \"_next\" in request.get_vars:\n login_next = request.get_vars[\"_next\"]\n\n self_registration = settings.get_security_registration_visible()\n if self_registration == \"index\":\n register = MM(\"Register\", c=\"default\", f=\"index\", m=\"register\",\n vars=dict(_next=login_next),\n check=self_registration)\n else:\n register = MM(\"Register\", m=\"register\",\n vars=dict(_next=login_next),\n check=self_registration)\n\n if settings.get_auth_password_changes() and \\\n settings.get_auth_password_retrieval():\n lost_pw = MM(\"Lost Password\", m=\"retrieve_password\")\n else:\n lost_pw = None\n\n menu_auth = MM(\"Login\", c=\"default\", f=\"openid_connect\", m=\"login\",\n _id=\"auth_menu_login\",\n vars=dict(_next=login_next), **attr)(\n MM(\"Login\", m=\"login\",\n vars=dict(_next=login_next)),\n register,\n lost_pw,\n )\n else:\n # Logged-in\n\n if settings.get_auth_password_changes():\n change_pw = MM(\"Change Password\", m=\"change_password\")\n else:\n change_pw = None\n\n menu_auth = MM(auth.user.email, c=\"default\", f=\"user\",\n translate=False, link=False, _id=\"auth_menu_email\",\n **attr)(\n MM(\"Logout\", m=\"logout\", _id=\"auth_menu_logout\"),\n MM(\"User Profile\", m=\"profile\"),\n MM(\"Personal Data\", c=\"default\", f=\"person\", m=\"update\"),\n MM(\"Contact Details\", c=\"pr\", f=\"person\",\n args=\"contact\",\n vars={\"person.pe_id\" : auth.user.pe_id}),\n #MM(\"Subscriptions\", c=\"pr\", f=\"person\",\n # args=\"pe_subscription\",\n # vars={\"person.pe_id\" : auth.user.pe_id}),\n change_pw,\n SEP(),\n MM({\"name\": current.T(\"Rapid Data Entry\"),\n \"id\": \"rapid_toggle\",\n \"value\": current.session.s3.rapid_data_entry is True},\n f=\"rapid\"),\n )\n\n return menu_auth\n\n# END =========================================================================\n",
"from gluon import current\nfrom s3 import *\nfrom s3layouts import *\ntry:\n from .layouts import *\nexcept ImportError:\n pass\nimport s3menus as default\n\n\nclass S3MainMenu(default.S3MainMenu):\n \"\"\" Custom Application Main Menu \"\"\"\n\n @classmethod\n def menu_modules(cls):\n \"\"\" Custom Modules Menu \"\"\"\n menu = [MM('Call Logs', c='event', f='incident_report'), MM(\n 'Incidents', c='event', f='incident', m='summary'), MM(\n 'Scenarios', c='event', f='scenario'), MM('more', link=False)(\n MM('Documents', c='doc', f='document'), MM('Events', c='event',\n f='event'), MM('Staff', c='hrm', f='staff'), MM('Volunteers', c\n ='vol', f='volunteer'), MM('Assets', c='asset', f='asset'), MM(\n 'Organizations', c='org', f='organisation'), MM('Facilities', c\n ='org', f='facility'), MM('Shelters', c='cr', f='shelter'), MM(\n 'Warehouses', c='inv', f='warehouse'), MM('Item Catalog', c=\n 'supply', f='catalog_item'))]\n return menu\n\n @classmethod\n def menu_auth(cls, **attr):\n \"\"\"\n Auth Menu\n - switch Login to use OpenID Connect\n \"\"\"\n auth = current.auth\n logged_in = auth.is_logged_in()\n settings = current.deployment_settings\n if not logged_in:\n request = current.request\n login_next = URL(args=request.args, vars=request.vars)\n if (request.controller == 'default' and request.function ==\n 'user' and '_next' in request.get_vars):\n login_next = request.get_vars['_next']\n self_registration = settings.get_security_registration_visible()\n if self_registration == 'index':\n register = MM('Register', c='default', f='index', m=\n 'register', vars=dict(_next=login_next), check=\n self_registration)\n else:\n register = MM('Register', m='register', vars=dict(_next=\n login_next), check=self_registration)\n if settings.get_auth_password_changes(\n ) and settings.get_auth_password_retrieval():\n lost_pw = MM('Lost Password', m='retrieve_password')\n else:\n lost_pw = None\n menu_auth = MM('Login', c='default', f='openid_connect', m=\n 'login', _id='auth_menu_login', vars=dict(_next=login_next),\n **attr)(MM('Login', m='login', vars=dict(_next=login_next)),\n register, lost_pw)\n else:\n if settings.get_auth_password_changes():\n change_pw = MM('Change Password', m='change_password')\n else:\n change_pw = None\n menu_auth = MM(auth.user.email, c='default', f='user',\n translate=False, link=False, _id='auth_menu_email', **attr)(MM\n ('Logout', m='logout', _id='auth_menu_logout'), MM(\n 'User Profile', m='profile'), MM('Personal Data', c=\n 'default', f='person', m='update'), MM('Contact Details', c\n ='pr', f='person', args='contact', vars={'person.pe_id':\n auth.user.pe_id}), change_pw, SEP(), MM({'name': current.T(\n 'Rapid Data Entry'), 'id': 'rapid_toggle', 'value': current\n .session.s3.rapid_data_entry is True}, f='rapid'))\n return menu_auth\n",
"<import token>\ntry:\n from .layouts import *\nexcept ImportError:\n pass\n<import token>\n\n\nclass S3MainMenu(default.S3MainMenu):\n \"\"\" Custom Application Main Menu \"\"\"\n\n @classmethod\n def menu_modules(cls):\n \"\"\" Custom Modules Menu \"\"\"\n menu = [MM('Call Logs', c='event', f='incident_report'), MM(\n 'Incidents', c='event', f='incident', m='summary'), MM(\n 'Scenarios', c='event', f='scenario'), MM('more', link=False)(\n MM('Documents', c='doc', f='document'), MM('Events', c='event',\n f='event'), MM('Staff', c='hrm', f='staff'), MM('Volunteers', c\n ='vol', f='volunteer'), MM('Assets', c='asset', f='asset'), MM(\n 'Organizations', c='org', f='organisation'), MM('Facilities', c\n ='org', f='facility'), MM('Shelters', c='cr', f='shelter'), MM(\n 'Warehouses', c='inv', f='warehouse'), MM('Item Catalog', c=\n 'supply', f='catalog_item'))]\n return menu\n\n @classmethod\n def menu_auth(cls, **attr):\n \"\"\"\n Auth Menu\n - switch Login to use OpenID Connect\n \"\"\"\n auth = current.auth\n logged_in = auth.is_logged_in()\n settings = current.deployment_settings\n if not logged_in:\n request = current.request\n login_next = URL(args=request.args, vars=request.vars)\n if (request.controller == 'default' and request.function ==\n 'user' and '_next' in request.get_vars):\n login_next = request.get_vars['_next']\n self_registration = settings.get_security_registration_visible()\n if self_registration == 'index':\n register = MM('Register', c='default', f='index', m=\n 'register', vars=dict(_next=login_next), check=\n self_registration)\n else:\n register = MM('Register', m='register', vars=dict(_next=\n login_next), check=self_registration)\n if settings.get_auth_password_changes(\n ) and settings.get_auth_password_retrieval():\n lost_pw = MM('Lost Password', m='retrieve_password')\n else:\n lost_pw = None\n menu_auth = MM('Login', c='default', f='openid_connect', m=\n 'login', _id='auth_menu_login', vars=dict(_next=login_next),\n **attr)(MM('Login', m='login', vars=dict(_next=login_next)),\n register, lost_pw)\n else:\n if settings.get_auth_password_changes():\n change_pw = MM('Change Password', m='change_password')\n else:\n change_pw = None\n menu_auth = MM(auth.user.email, c='default', f='user',\n translate=False, link=False, _id='auth_menu_email', **attr)(MM\n ('Logout', m='logout', _id='auth_menu_logout'), MM(\n 'User Profile', m='profile'), MM('Personal Data', c=\n 'default', f='person', m='update'), MM('Contact Details', c\n ='pr', f='person', args='contact', vars={'person.pe_id':\n auth.user.pe_id}), change_pw, SEP(), MM({'name': current.T(\n 'Rapid Data Entry'), 'id': 'rapid_toggle', 'value': current\n .session.s3.rapid_data_entry is True}, f='rapid'))\n return menu_auth\n",
"<import token>\n<code token>\n<import token>\n\n\nclass S3MainMenu(default.S3MainMenu):\n \"\"\" Custom Application Main Menu \"\"\"\n\n @classmethod\n def menu_modules(cls):\n \"\"\" Custom Modules Menu \"\"\"\n menu = [MM('Call Logs', c='event', f='incident_report'), MM(\n 'Incidents', c='event', f='incident', m='summary'), MM(\n 'Scenarios', c='event', f='scenario'), MM('more', link=False)(\n MM('Documents', c='doc', f='document'), MM('Events', c='event',\n f='event'), MM('Staff', c='hrm', f='staff'), MM('Volunteers', c\n ='vol', f='volunteer'), MM('Assets', c='asset', f='asset'), MM(\n 'Organizations', c='org', f='organisation'), MM('Facilities', c\n ='org', f='facility'), MM('Shelters', c='cr', f='shelter'), MM(\n 'Warehouses', c='inv', f='warehouse'), MM('Item Catalog', c=\n 'supply', f='catalog_item'))]\n return menu\n\n @classmethod\n def menu_auth(cls, **attr):\n \"\"\"\n Auth Menu\n - switch Login to use OpenID Connect\n \"\"\"\n auth = current.auth\n logged_in = auth.is_logged_in()\n settings = current.deployment_settings\n if not logged_in:\n request = current.request\n login_next = URL(args=request.args, vars=request.vars)\n if (request.controller == 'default' and request.function ==\n 'user' and '_next' in request.get_vars):\n login_next = request.get_vars['_next']\n self_registration = settings.get_security_registration_visible()\n if self_registration == 'index':\n register = MM('Register', c='default', f='index', m=\n 'register', vars=dict(_next=login_next), check=\n self_registration)\n else:\n register = MM('Register', m='register', vars=dict(_next=\n login_next), check=self_registration)\n if settings.get_auth_password_changes(\n ) and settings.get_auth_password_retrieval():\n lost_pw = MM('Lost Password', m='retrieve_password')\n else:\n lost_pw = None\n menu_auth = MM('Login', c='default', f='openid_connect', m=\n 'login', _id='auth_menu_login', vars=dict(_next=login_next),\n **attr)(MM('Login', m='login', vars=dict(_next=login_next)),\n register, lost_pw)\n else:\n if settings.get_auth_password_changes():\n change_pw = MM('Change Password', m='change_password')\n else:\n change_pw = None\n menu_auth = MM(auth.user.email, c='default', f='user',\n translate=False, link=False, _id='auth_menu_email', **attr)(MM\n ('Logout', m='logout', _id='auth_menu_logout'), MM(\n 'User Profile', m='profile'), MM('Personal Data', c=\n 'default', f='person', m='update'), MM('Contact Details', c\n ='pr', f='person', args='contact', vars={'person.pe_id':\n auth.user.pe_id}), change_pw, SEP(), MM({'name': current.T(\n 'Rapid Data Entry'), 'id': 'rapid_toggle', 'value': current\n .session.s3.rapid_data_entry is True}, f='rapid'))\n return menu_auth\n",
"<import token>\n<code token>\n<import token>\n\n\nclass S3MainMenu(default.S3MainMenu):\n <docstring token>\n\n @classmethod\n def menu_modules(cls):\n \"\"\" Custom Modules Menu \"\"\"\n menu = [MM('Call Logs', c='event', f='incident_report'), MM(\n 'Incidents', c='event', f='incident', m='summary'), MM(\n 'Scenarios', c='event', f='scenario'), MM('more', link=False)(\n MM('Documents', c='doc', f='document'), MM('Events', c='event',\n f='event'), MM('Staff', c='hrm', f='staff'), MM('Volunteers', c\n ='vol', f='volunteer'), MM('Assets', c='asset', f='asset'), MM(\n 'Organizations', c='org', f='organisation'), MM('Facilities', c\n ='org', f='facility'), MM('Shelters', c='cr', f='shelter'), MM(\n 'Warehouses', c='inv', f='warehouse'), MM('Item Catalog', c=\n 'supply', f='catalog_item'))]\n return menu\n\n @classmethod\n def menu_auth(cls, **attr):\n \"\"\"\n Auth Menu\n - switch Login to use OpenID Connect\n \"\"\"\n auth = current.auth\n logged_in = auth.is_logged_in()\n settings = current.deployment_settings\n if not logged_in:\n request = current.request\n login_next = URL(args=request.args, vars=request.vars)\n if (request.controller == 'default' and request.function ==\n 'user' and '_next' in request.get_vars):\n login_next = request.get_vars['_next']\n self_registration = settings.get_security_registration_visible()\n if self_registration == 'index':\n register = MM('Register', c='default', f='index', m=\n 'register', vars=dict(_next=login_next), check=\n self_registration)\n else:\n register = MM('Register', m='register', vars=dict(_next=\n login_next), check=self_registration)\n if settings.get_auth_password_changes(\n ) and settings.get_auth_password_retrieval():\n lost_pw = MM('Lost Password', m='retrieve_password')\n else:\n lost_pw = None\n menu_auth = MM('Login', c='default', f='openid_connect', m=\n 'login', _id='auth_menu_login', vars=dict(_next=login_next),\n **attr)(MM('Login', m='login', vars=dict(_next=login_next)),\n register, lost_pw)\n else:\n if settings.get_auth_password_changes():\n change_pw = MM('Change Password', m='change_password')\n else:\n change_pw = None\n menu_auth = MM(auth.user.email, c='default', f='user',\n translate=False, link=False, _id='auth_menu_email', **attr)(MM\n ('Logout', m='logout', _id='auth_menu_logout'), MM(\n 'User Profile', m='profile'), MM('Personal Data', c=\n 'default', f='person', m='update'), MM('Contact Details', c\n ='pr', f='person', args='contact', vars={'person.pe_id':\n auth.user.pe_id}), change_pw, SEP(), MM({'name': current.T(\n 'Rapid Data Entry'), 'id': 'rapid_toggle', 'value': current\n .session.s3.rapid_data_entry is True}, f='rapid'))\n return menu_auth\n",
"<import token>\n<code token>\n<import token>\n\n\nclass S3MainMenu(default.S3MainMenu):\n <docstring token>\n <function token>\n\n @classmethod\n def menu_auth(cls, **attr):\n \"\"\"\n Auth Menu\n - switch Login to use OpenID Connect\n \"\"\"\n auth = current.auth\n logged_in = auth.is_logged_in()\n settings = current.deployment_settings\n if not logged_in:\n request = current.request\n login_next = URL(args=request.args, vars=request.vars)\n if (request.controller == 'default' and request.function ==\n 'user' and '_next' in request.get_vars):\n login_next = request.get_vars['_next']\n self_registration = settings.get_security_registration_visible()\n if self_registration == 'index':\n register = MM('Register', c='default', f='index', m=\n 'register', vars=dict(_next=login_next), check=\n self_registration)\n else:\n register = MM('Register', m='register', vars=dict(_next=\n login_next), check=self_registration)\n if settings.get_auth_password_changes(\n ) and settings.get_auth_password_retrieval():\n lost_pw = MM('Lost Password', m='retrieve_password')\n else:\n lost_pw = None\n menu_auth = MM('Login', c='default', f='openid_connect', m=\n 'login', _id='auth_menu_login', vars=dict(_next=login_next),\n **attr)(MM('Login', m='login', vars=dict(_next=login_next)),\n register, lost_pw)\n else:\n if settings.get_auth_password_changes():\n change_pw = MM('Change Password', m='change_password')\n else:\n change_pw = None\n menu_auth = MM(auth.user.email, c='default', f='user',\n translate=False, link=False, _id='auth_menu_email', **attr)(MM\n ('Logout', m='logout', _id='auth_menu_logout'), MM(\n 'User Profile', m='profile'), MM('Personal Data', c=\n 'default', f='person', m='update'), MM('Contact Details', c\n ='pr', f='person', args='contact', vars={'person.pe_id':\n auth.user.pe_id}), change_pw, SEP(), MM({'name': current.T(\n 'Rapid Data Entry'), 'id': 'rapid_toggle', 'value': current\n .session.s3.rapid_data_entry is True}, f='rapid'))\n return menu_auth\n",
"<import token>\n<code token>\n<import token>\n\n\nclass S3MainMenu(default.S3MainMenu):\n <docstring token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<import token>\n<class token>\n"
] | false |
98,346 |
8ec82fc579e3fbf57f4414e7b1be715025ce4ca7
|
from pathlib import Path
from sqlalchemy import inspect
import pysodium
from environs import Env
def sa_to_dict(obj):
"""
Serialize SQLAlchemy object to dictionary.
- https://stackoverflow.com/a/37350445
- https://docs.sqlalchemy.org/en/14/core/inspection.html
:param obj:
:return:
"""
return {c.key: getattr(obj, c.key)
for c in inspect(obj).mapper.column_attrs}
def gen_keypair():
return pysodium.crypto_box_keypair()
def read_config():
here = Path(__file__).parent.parent.parent
env = Env(expand_vars=True)
env.read_env(here / 'etc/tunfish/config/.env', recurse=False)
|
[
"from pathlib import Path\n\nfrom sqlalchemy import inspect\nimport pysodium\n\nfrom environs import Env\n\n\ndef sa_to_dict(obj):\n \"\"\"\n Serialize SQLAlchemy object to dictionary.\n - https://stackoverflow.com/a/37350445\n - https://docs.sqlalchemy.org/en/14/core/inspection.html\n :param obj:\n :return:\n \"\"\"\n return {c.key: getattr(obj, c.key)\n for c in inspect(obj).mapper.column_attrs}\n\n\ndef gen_keypair():\n return pysodium.crypto_box_keypair()\n\n\ndef read_config():\n here = Path(__file__).parent.parent.parent\n\n env = Env(expand_vars=True)\n env.read_env(here / 'etc/tunfish/config/.env', recurse=False)\n",
"from pathlib import Path\nfrom sqlalchemy import inspect\nimport pysodium\nfrom environs import Env\n\n\ndef sa_to_dict(obj):\n \"\"\"\n Serialize SQLAlchemy object to dictionary.\n - https://stackoverflow.com/a/37350445\n - https://docs.sqlalchemy.org/en/14/core/inspection.html\n :param obj:\n :return:\n \"\"\"\n return {c.key: getattr(obj, c.key) for c in inspect(obj).mapper.\n column_attrs}\n\n\ndef gen_keypair():\n return pysodium.crypto_box_keypair()\n\n\ndef read_config():\n here = Path(__file__).parent.parent.parent\n env = Env(expand_vars=True)\n env.read_env(here / 'etc/tunfish/config/.env', recurse=False)\n",
"<import token>\n\n\ndef sa_to_dict(obj):\n \"\"\"\n Serialize SQLAlchemy object to dictionary.\n - https://stackoverflow.com/a/37350445\n - https://docs.sqlalchemy.org/en/14/core/inspection.html\n :param obj:\n :return:\n \"\"\"\n return {c.key: getattr(obj, c.key) for c in inspect(obj).mapper.\n column_attrs}\n\n\ndef gen_keypair():\n return pysodium.crypto_box_keypair()\n\n\ndef read_config():\n here = Path(__file__).parent.parent.parent\n env = Env(expand_vars=True)\n env.read_env(here / 'etc/tunfish/config/.env', recurse=False)\n",
"<import token>\n\n\ndef sa_to_dict(obj):\n \"\"\"\n Serialize SQLAlchemy object to dictionary.\n - https://stackoverflow.com/a/37350445\n - https://docs.sqlalchemy.org/en/14/core/inspection.html\n :param obj:\n :return:\n \"\"\"\n return {c.key: getattr(obj, c.key) for c in inspect(obj).mapper.\n column_attrs}\n\n\n<function token>\n\n\ndef read_config():\n here = Path(__file__).parent.parent.parent\n env = Env(expand_vars=True)\n env.read_env(here / 'etc/tunfish/config/.env', recurse=False)\n",
"<import token>\n<function token>\n<function token>\n\n\ndef read_config():\n here = Path(__file__).parent.parent.parent\n env = Env(expand_vars=True)\n env.read_env(here / 'etc/tunfish/config/.env', recurse=False)\n",
"<import token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,347 |
e6003b685275ffc5fb18cd9e1640ee79b3a5d546
|
# -*- coding: utf-8 -*-
import json
import logging
import requests
from pygns.exceptions import GNS3GenericError, GNS3ProjectExitsError
class GNS3Project:
"""
Create a new GNS3 Project
http://api.gns3.net/en/latest/curl.html#create-project
"""
def __init__(self, project_name, gns3server):
"""
:param project_name: Project name
:param gns3server: GNS3Server object
"""
self._project_name = project_name
self._api_endpoint = gns3server.api_endpoint()
self._url = '{}projects'.format(self._api_endpoint)
data = {"name": project_name}
self._response = requests.post(self._url, data=json.dumps(data))
status_code = self._response.status_code
if status_code == 409:
raise GNS3ProjectExitsError('File {}.gns3 already exists.'.format(project_name))
elif status_code == 404:
raise GNS3GenericError('This Error is not expected, please contact developer')
else:
params = self.get_project_params()
self.base_url = '{}projects/{}'.format(params['server_end_point'], params['project_id'])
self.nodes_url = self.base_url + '/nodes'
self.links_url = self.base_url + '/links'
def __repr__(self):
params = json.dumps(self.get_project_params(), indent=4, sort_keys=True)
return '{}: {}'.format(self.__class__.__name__, params)
def get_project_params(self):
"""
GNS3 Project params
:return:
"""
r = self._response.json()
params = {
'server_end_point': self._api_endpoint,
'project_id': r.get('project_id'),
'filename': r.get('filename'),
'path': r.get('path'),
'status': r.get('status'),
}
return params
def get_all_links(self):
"""
List all links in the project
:return:
"""
links_url = "{}/links".format(self._project_url)
print(links_url)
response = requests.get(links_url).json()
return json.dumps(response, indent=4, sort_keys=True)
|
[
"# -*- coding: utf-8 -*-\n\nimport json\nimport logging\nimport requests\nfrom pygns.exceptions import GNS3GenericError, GNS3ProjectExitsError\n\n\nclass GNS3Project:\n \"\"\"\n Create a new GNS3 Project\n http://api.gns3.net/en/latest/curl.html#create-project\n \"\"\"\n\n def __init__(self, project_name, gns3server):\n \"\"\"\n\n :param project_name: Project name\n :param gns3server: GNS3Server object\n \"\"\"\n self._project_name = project_name\n self._api_endpoint = gns3server.api_endpoint()\n self._url = '{}projects'.format(self._api_endpoint)\n data = {\"name\": project_name}\n self._response = requests.post(self._url, data=json.dumps(data))\n status_code = self._response.status_code\n if status_code == 409:\n raise GNS3ProjectExitsError('File {}.gns3 already exists.'.format(project_name))\n elif status_code == 404:\n raise GNS3GenericError('This Error is not expected, please contact developer')\n else:\n params = self.get_project_params()\n self.base_url = '{}projects/{}'.format(params['server_end_point'], params['project_id'])\n self.nodes_url = self.base_url + '/nodes'\n self.links_url = self.base_url + '/links'\n\n def __repr__(self):\n params = json.dumps(self.get_project_params(), indent=4, sort_keys=True)\n return '{}: {}'.format(self.__class__.__name__, params)\n\n def get_project_params(self):\n \"\"\"\n GNS3 Project params\n :return: \n \"\"\"\n r = self._response.json()\n params = {\n 'server_end_point': self._api_endpoint,\n 'project_id': r.get('project_id'),\n 'filename': r.get('filename'),\n 'path': r.get('path'),\n 'status': r.get('status'),\n }\n return params\n\n def get_all_links(self):\n \"\"\"\n List all links in the project\n :return: \n \"\"\"\n links_url = \"{}/links\".format(self._project_url)\n print(links_url)\n response = requests.get(links_url).json()\n return json.dumps(response, indent=4, sort_keys=True)\n",
"import json\nimport logging\nimport requests\nfrom pygns.exceptions import GNS3GenericError, GNS3ProjectExitsError\n\n\nclass GNS3Project:\n \"\"\"\n Create a new GNS3 Project\n http://api.gns3.net/en/latest/curl.html#create-project\n \"\"\"\n\n def __init__(self, project_name, gns3server):\n \"\"\"\n\n :param project_name: Project name\n :param gns3server: GNS3Server object\n \"\"\"\n self._project_name = project_name\n self._api_endpoint = gns3server.api_endpoint()\n self._url = '{}projects'.format(self._api_endpoint)\n data = {'name': project_name}\n self._response = requests.post(self._url, data=json.dumps(data))\n status_code = self._response.status_code\n if status_code == 409:\n raise GNS3ProjectExitsError('File {}.gns3 already exists.'.\n format(project_name))\n elif status_code == 404:\n raise GNS3GenericError(\n 'This Error is not expected, please contact developer')\n else:\n params = self.get_project_params()\n self.base_url = '{}projects/{}'.format(params[\n 'server_end_point'], params['project_id'])\n self.nodes_url = self.base_url + '/nodes'\n self.links_url = self.base_url + '/links'\n\n def __repr__(self):\n params = json.dumps(self.get_project_params(), indent=4, sort_keys=True\n )\n return '{}: {}'.format(self.__class__.__name__, params)\n\n def get_project_params(self):\n \"\"\"\n GNS3 Project params\n :return: \n \"\"\"\n r = self._response.json()\n params = {'server_end_point': self._api_endpoint, 'project_id': r.\n get('project_id'), 'filename': r.get('filename'), 'path': r.get\n ('path'), 'status': r.get('status')}\n return params\n\n def get_all_links(self):\n \"\"\"\n List all links in the project\n :return: \n \"\"\"\n links_url = '{}/links'.format(self._project_url)\n print(links_url)\n response = requests.get(links_url).json()\n return json.dumps(response, indent=4, sort_keys=True)\n",
"<import token>\n\n\nclass GNS3Project:\n \"\"\"\n Create a new GNS3 Project\n http://api.gns3.net/en/latest/curl.html#create-project\n \"\"\"\n\n def __init__(self, project_name, gns3server):\n \"\"\"\n\n :param project_name: Project name\n :param gns3server: GNS3Server object\n \"\"\"\n self._project_name = project_name\n self._api_endpoint = gns3server.api_endpoint()\n self._url = '{}projects'.format(self._api_endpoint)\n data = {'name': project_name}\n self._response = requests.post(self._url, data=json.dumps(data))\n status_code = self._response.status_code\n if status_code == 409:\n raise GNS3ProjectExitsError('File {}.gns3 already exists.'.\n format(project_name))\n elif status_code == 404:\n raise GNS3GenericError(\n 'This Error is not expected, please contact developer')\n else:\n params = self.get_project_params()\n self.base_url = '{}projects/{}'.format(params[\n 'server_end_point'], params['project_id'])\n self.nodes_url = self.base_url + '/nodes'\n self.links_url = self.base_url + '/links'\n\n def __repr__(self):\n params = json.dumps(self.get_project_params(), indent=4, sort_keys=True\n )\n return '{}: {}'.format(self.__class__.__name__, params)\n\n def get_project_params(self):\n \"\"\"\n GNS3 Project params\n :return: \n \"\"\"\n r = self._response.json()\n params = {'server_end_point': self._api_endpoint, 'project_id': r.\n get('project_id'), 'filename': r.get('filename'), 'path': r.get\n ('path'), 'status': r.get('status')}\n return params\n\n def get_all_links(self):\n \"\"\"\n List all links in the project\n :return: \n \"\"\"\n links_url = '{}/links'.format(self._project_url)\n print(links_url)\n response = requests.get(links_url).json()\n return json.dumps(response, indent=4, sort_keys=True)\n",
"<import token>\n\n\nclass GNS3Project:\n <docstring token>\n\n def __init__(self, project_name, gns3server):\n \"\"\"\n\n :param project_name: Project name\n :param gns3server: GNS3Server object\n \"\"\"\n self._project_name = project_name\n self._api_endpoint = gns3server.api_endpoint()\n self._url = '{}projects'.format(self._api_endpoint)\n data = {'name': project_name}\n self._response = requests.post(self._url, data=json.dumps(data))\n status_code = self._response.status_code\n if status_code == 409:\n raise GNS3ProjectExitsError('File {}.gns3 already exists.'.\n format(project_name))\n elif status_code == 404:\n raise GNS3GenericError(\n 'This Error is not expected, please contact developer')\n else:\n params = self.get_project_params()\n self.base_url = '{}projects/{}'.format(params[\n 'server_end_point'], params['project_id'])\n self.nodes_url = self.base_url + '/nodes'\n self.links_url = self.base_url + '/links'\n\n def __repr__(self):\n params = json.dumps(self.get_project_params(), indent=4, sort_keys=True\n )\n return '{}: {}'.format(self.__class__.__name__, params)\n\n def get_project_params(self):\n \"\"\"\n GNS3 Project params\n :return: \n \"\"\"\n r = self._response.json()\n params = {'server_end_point': self._api_endpoint, 'project_id': r.\n get('project_id'), 'filename': r.get('filename'), 'path': r.get\n ('path'), 'status': r.get('status')}\n return params\n\n def get_all_links(self):\n \"\"\"\n List all links in the project\n :return: \n \"\"\"\n links_url = '{}/links'.format(self._project_url)\n print(links_url)\n response = requests.get(links_url).json()\n return json.dumps(response, indent=4, sort_keys=True)\n",
"<import token>\n\n\nclass GNS3Project:\n <docstring token>\n\n def __init__(self, project_name, gns3server):\n \"\"\"\n\n :param project_name: Project name\n :param gns3server: GNS3Server object\n \"\"\"\n self._project_name = project_name\n self._api_endpoint = gns3server.api_endpoint()\n self._url = '{}projects'.format(self._api_endpoint)\n data = {'name': project_name}\n self._response = requests.post(self._url, data=json.dumps(data))\n status_code = self._response.status_code\n if status_code == 409:\n raise GNS3ProjectExitsError('File {}.gns3 already exists.'.\n format(project_name))\n elif status_code == 404:\n raise GNS3GenericError(\n 'This Error is not expected, please contact developer')\n else:\n params = self.get_project_params()\n self.base_url = '{}projects/{}'.format(params[\n 'server_end_point'], params['project_id'])\n self.nodes_url = self.base_url + '/nodes'\n self.links_url = self.base_url + '/links'\n <function token>\n\n def get_project_params(self):\n \"\"\"\n GNS3 Project params\n :return: \n \"\"\"\n r = self._response.json()\n params = {'server_end_point': self._api_endpoint, 'project_id': r.\n get('project_id'), 'filename': r.get('filename'), 'path': r.get\n ('path'), 'status': r.get('status')}\n return params\n\n def get_all_links(self):\n \"\"\"\n List all links in the project\n :return: \n \"\"\"\n links_url = '{}/links'.format(self._project_url)\n print(links_url)\n response = requests.get(links_url).json()\n return json.dumps(response, indent=4, sort_keys=True)\n",
"<import token>\n\n\nclass GNS3Project:\n <docstring token>\n\n def __init__(self, project_name, gns3server):\n \"\"\"\n\n :param project_name: Project name\n :param gns3server: GNS3Server object\n \"\"\"\n self._project_name = project_name\n self._api_endpoint = gns3server.api_endpoint()\n self._url = '{}projects'.format(self._api_endpoint)\n data = {'name': project_name}\n self._response = requests.post(self._url, data=json.dumps(data))\n status_code = self._response.status_code\n if status_code == 409:\n raise GNS3ProjectExitsError('File {}.gns3 already exists.'.\n format(project_name))\n elif status_code == 404:\n raise GNS3GenericError(\n 'This Error is not expected, please contact developer')\n else:\n params = self.get_project_params()\n self.base_url = '{}projects/{}'.format(params[\n 'server_end_point'], params['project_id'])\n self.nodes_url = self.base_url + '/nodes'\n self.links_url = self.base_url + '/links'\n <function token>\n\n def get_project_params(self):\n \"\"\"\n GNS3 Project params\n :return: \n \"\"\"\n r = self._response.json()\n params = {'server_end_point': self._api_endpoint, 'project_id': r.\n get('project_id'), 'filename': r.get('filename'), 'path': r.get\n ('path'), 'status': r.get('status')}\n return params\n <function token>\n",
"<import token>\n\n\nclass GNS3Project:\n <docstring token>\n <function token>\n <function token>\n\n def get_project_params(self):\n \"\"\"\n GNS3 Project params\n :return: \n \"\"\"\n r = self._response.json()\n params = {'server_end_point': self._api_endpoint, 'project_id': r.\n get('project_id'), 'filename': r.get('filename'), 'path': r.get\n ('path'), 'status': r.get('status')}\n return params\n <function token>\n",
"<import token>\n\n\nclass GNS3Project:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,348 |
48c12e5bdf4aa5bb744a01e7925363ef623086a2
|
def divisors(n):
l = []
for i in range (1, n):
res = divmod(n, i)
if res[1] == 0:
l.append(res[0])
print l
if __name__ == '__main__':
q = True
while q == True:
n1 = input('Please input positive number: ')
if n1 <= 0:
print 'Please put positive value'
else:
q = False
divisors(n1)
|
[
"def divisors(n):\n l = []\n for i in range (1, n):\n res = divmod(n, i)\n if res[1] == 0:\n l.append(res[0])\n print l\n \n\n\n\n\nif __name__ == '__main__':\n q = True\n while q == True:\n n1 = input('Please input positive number: ')\n if n1 <= 0:\n print 'Please put positive value'\n else:\n q = False\n divisors(n1)\n \n "
] | true |
98,349 |
d3f485adb0bed411cf150500ad6283d13a4ce7a9
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 20 01:57:49 2019
@author: dee
"""
import quandl
import pandas as pd
import pyodbc
import os
import requests
import json
import numpy as np
from pathlib import Path
company_data_pd = pd.read_excel('company_country_list.xls')
metric_table = np.loadtxt('metric_table.csv',delimiter = ',')
country_metric_table = np.load('metric_countries_table.npy')
def score(isin):
company_idx = np.where(company_data_pd['isin'] == isin)[0]
if company_idx.size == 0:
return 0.
ticker, name,country = company_data_pd.iloc[company_idx[0],[2,4,5]]
idx = np.where(np.array(country_metric_table,dtype=object) == country)[0]
if idx.size > 0:
score = metric_table[idx, -1]
return score
else:
return 0.
main_score_list = []
main_score_list_names = []
pathlist = Path('Supplier_Data2').glob('**/*.npy')
for path in pathlist:
parent_dict = np.load(str(path), allow_pickle = True).item()
if type(parent_dict) != str:
print ('----',str(path).split('/')[1].split('.')[0],'----')
score1 = 0.
score2 = 0.
for value, key in parent_dict.items():
score_val = score(value)
number_children1 = len(key.values())
for value1, key1 in key.items():
score_val1 = score(value1)
score1 += (score_val1*1/number_children1)
number_children2 = len(key1.values())
for value2, key2 in key1.items():
score_val2 = score(value2)
score2 += (score_val2*(1/number_children1)*(1/number_children2))
score_list = [score_val, score1, score2]
print (score_list)
print (np.sum(score_list))
main_score_list.append(np.sum(score_list))
main_score_list_names.append(str(path).split('/')[1].split('.')[0])
|
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 20 01:57:49 2019\n\n@author: dee\n\"\"\"\n\n\nimport quandl\nimport pandas as pd\nimport pyodbc\nimport os\nimport requests\nimport json\nimport numpy as np\nfrom pathlib import Path\n\n\ncompany_data_pd = pd.read_excel('company_country_list.xls') \n\nmetric_table = np.loadtxt('metric_table.csv',delimiter = ',')\ncountry_metric_table = np.load('metric_countries_table.npy')\ndef score(isin):\n company_idx = np.where(company_data_pd['isin'] == isin)[0]\n if company_idx.size == 0:\n return 0.\n ticker, name,country = company_data_pd.iloc[company_idx[0],[2,4,5]]\n idx = np.where(np.array(country_metric_table,dtype=object) == country)[0]\n if idx.size > 0:\n score = metric_table[idx, -1]\n return score\n else:\n return 0.\n\nmain_score_list = []\nmain_score_list_names = []\npathlist = Path('Supplier_Data2').glob('**/*.npy')\nfor path in pathlist:\n parent_dict = np.load(str(path), allow_pickle = True).item()\n if type(parent_dict) != str:\n print ('----',str(path).split('/')[1].split('.')[0],'----')\n score1 = 0.\n score2 = 0.\n for value, key in parent_dict.items():\n score_val = score(value)\n number_children1 = len(key.values())\n for value1, key1 in key.items():\n score_val1 = score(value1)\n score1 += (score_val1*1/number_children1)\n number_children2 = len(key1.values())\n for value2, key2 in key1.items():\n score_val2 = score(value2)\n score2 += (score_val2*(1/number_children1)*(1/number_children2))\n \n score_list = [score_val, score1, score2]\n print (score_list)\n print (np.sum(score_list))\n main_score_list.append(np.sum(score_list))\n main_score_list_names.append(str(path).split('/')[1].split('.')[0])\n\n\n",
"<docstring token>\nimport quandl\nimport pandas as pd\nimport pyodbc\nimport os\nimport requests\nimport json\nimport numpy as np\nfrom pathlib import Path\ncompany_data_pd = pd.read_excel('company_country_list.xls')\nmetric_table = np.loadtxt('metric_table.csv', delimiter=',')\ncountry_metric_table = np.load('metric_countries_table.npy')\n\n\ndef score(isin):\n company_idx = np.where(company_data_pd['isin'] == isin)[0]\n if company_idx.size == 0:\n return 0.0\n ticker, name, country = company_data_pd.iloc[company_idx[0], [2, 4, 5]]\n idx = np.where(np.array(country_metric_table, dtype=object) == country)[0]\n if idx.size > 0:\n score = metric_table[idx, -1]\n return score\n else:\n return 0.0\n\n\nmain_score_list = []\nmain_score_list_names = []\npathlist = Path('Supplier_Data2').glob('**/*.npy')\nfor path in pathlist:\n parent_dict = np.load(str(path), allow_pickle=True).item()\n if type(parent_dict) != str:\n print('----', str(path).split('/')[1].split('.')[0], '----')\n score1 = 0.0\n score2 = 0.0\n for value, key in parent_dict.items():\n score_val = score(value)\n number_children1 = len(key.values())\n for value1, key1 in key.items():\n score_val1 = score(value1)\n score1 += score_val1 * 1 / number_children1\n number_children2 = len(key1.values())\n for value2, key2 in key1.items():\n score_val2 = score(value2)\n score2 += score_val2 * (1 / number_children1) * (1 /\n number_children2)\n score_list = [score_val, score1, score2]\n print(score_list)\n print(np.sum(score_list))\n main_score_list.append(np.sum(score_list))\n main_score_list_names.append(str(path).split('/')[1].split('.')[0])\n",
"<docstring token>\n<import token>\ncompany_data_pd = pd.read_excel('company_country_list.xls')\nmetric_table = np.loadtxt('metric_table.csv', delimiter=',')\ncountry_metric_table = np.load('metric_countries_table.npy')\n\n\ndef score(isin):\n company_idx = np.where(company_data_pd['isin'] == isin)[0]\n if company_idx.size == 0:\n return 0.0\n ticker, name, country = company_data_pd.iloc[company_idx[0], [2, 4, 5]]\n idx = np.where(np.array(country_metric_table, dtype=object) == country)[0]\n if idx.size > 0:\n score = metric_table[idx, -1]\n return score\n else:\n return 0.0\n\n\nmain_score_list = []\nmain_score_list_names = []\npathlist = Path('Supplier_Data2').glob('**/*.npy')\nfor path in pathlist:\n parent_dict = np.load(str(path), allow_pickle=True).item()\n if type(parent_dict) != str:\n print('----', str(path).split('/')[1].split('.')[0], '----')\n score1 = 0.0\n score2 = 0.0\n for value, key in parent_dict.items():\n score_val = score(value)\n number_children1 = len(key.values())\n for value1, key1 in key.items():\n score_val1 = score(value1)\n score1 += score_val1 * 1 / number_children1\n number_children2 = len(key1.values())\n for value2, key2 in key1.items():\n score_val2 = score(value2)\n score2 += score_val2 * (1 / number_children1) * (1 /\n number_children2)\n score_list = [score_val, score1, score2]\n print(score_list)\n print(np.sum(score_list))\n main_score_list.append(np.sum(score_list))\n main_score_list_names.append(str(path).split('/')[1].split('.')[0])\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef score(isin):\n company_idx = np.where(company_data_pd['isin'] == isin)[0]\n if company_idx.size == 0:\n return 0.0\n ticker, name, country = company_data_pd.iloc[company_idx[0], [2, 4, 5]]\n idx = np.where(np.array(country_metric_table, dtype=object) == country)[0]\n if idx.size > 0:\n score = metric_table[idx, -1]\n return score\n else:\n return 0.0\n\n\n<assignment token>\nfor path in pathlist:\n parent_dict = np.load(str(path), allow_pickle=True).item()\n if type(parent_dict) != str:\n print('----', str(path).split('/')[1].split('.')[0], '----')\n score1 = 0.0\n score2 = 0.0\n for value, key in parent_dict.items():\n score_val = score(value)\n number_children1 = len(key.values())\n for value1, key1 in key.items():\n score_val1 = score(value1)\n score1 += score_val1 * 1 / number_children1\n number_children2 = len(key1.values())\n for value2, key2 in key1.items():\n score_val2 = score(value2)\n score2 += score_val2 * (1 / number_children1) * (1 /\n number_children2)\n score_list = [score_val, score1, score2]\n print(score_list)\n print(np.sum(score_list))\n main_score_list.append(np.sum(score_list))\n main_score_list_names.append(str(path).split('/')[1].split('.')[0])\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef score(isin):\n company_idx = np.where(company_data_pd['isin'] == isin)[0]\n if company_idx.size == 0:\n return 0.0\n ticker, name, country = company_data_pd.iloc[company_idx[0], [2, 4, 5]]\n idx = np.where(np.array(country_metric_table, dtype=object) == country)[0]\n if idx.size > 0:\n score = metric_table[idx, -1]\n return score\n else:\n return 0.0\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
98,350 |
1663473a1f73e7981f43e35aa288ec539c75a3cc
|
# main.py -- put your code here!
import PS2
while True:
ps=PS2.PS2KEY('X18','X19','X20','X21')
a=ps.ps2_key()
if(a==13):
pyb.LED(1).on()
elif(a==14):
pyb.LED(2).on()
elif(a==15):
pyb.LED(3).on()
elif(a==16):
pyb.LED(4).on()
elif(a==5):
pyb.LED(1).off()
elif(a==6):
pyb.LED(2).off()
elif(a==7):
pyb.LED(3).off()
elif(a==8):
pyb.LED(4).off()
|
[
"# main.py -- put your code here!\nimport PS2\n\nwhile True:\n\tps=PS2.PS2KEY('X18','X19','X20','X21')\n\ta=ps.ps2_key()\n\tif(a==13):\n\t\tpyb.LED(1).on()\n\telif(a==14):\n\t\tpyb.LED(2).on()\n\telif(a==15):\n\t\tpyb.LED(3).on()\n\telif(a==16):\n\t\tpyb.LED(4).on()\n\telif(a==5):\n\t\tpyb.LED(1).off()\n\telif(a==6):\n\t\tpyb.LED(2).off()\n\telif(a==7):\n\t\tpyb.LED(3).off()\n\telif(a==8):\n\t\tpyb.LED(4).off()\n",
"import PS2\nwhile True:\n ps = PS2.PS2KEY('X18', 'X19', 'X20', 'X21')\n a = ps.ps2_key()\n if a == 13:\n pyb.LED(1).on()\n elif a == 14:\n pyb.LED(2).on()\n elif a == 15:\n pyb.LED(3).on()\n elif a == 16:\n pyb.LED(4).on()\n elif a == 5:\n pyb.LED(1).off()\n elif a == 6:\n pyb.LED(2).off()\n elif a == 7:\n pyb.LED(3).off()\n elif a == 8:\n pyb.LED(4).off()\n",
"<import token>\nwhile True:\n ps = PS2.PS2KEY('X18', 'X19', 'X20', 'X21')\n a = ps.ps2_key()\n if a == 13:\n pyb.LED(1).on()\n elif a == 14:\n pyb.LED(2).on()\n elif a == 15:\n pyb.LED(3).on()\n elif a == 16:\n pyb.LED(4).on()\n elif a == 5:\n pyb.LED(1).off()\n elif a == 6:\n pyb.LED(2).off()\n elif a == 7:\n pyb.LED(3).off()\n elif a == 8:\n pyb.LED(4).off()\n",
"<import token>\n<code token>\n"
] | false |
98,351 |
fb0e9a55771e26aeb618c0c894a3d7e4fdc681a6
|
# -*- coding: utf-8 -*-
"""
@Time : 2020/8/8
@Author : jim
@File : [64]最小路径和
@Description :
"""
# 思路1:BFS 暴力搜索
# 思路2:DP
# 状态数组:f[i][j] = min(f[i][j-1],f[i-1][j]) + g[i][j]
# 状态转移方程 dp[]
# 自顶向下
def minPathSum_DP1(self, grid: List[List[int]]) -> int:
x = len(grid)
if x == 0:
return 0
else:
y = len(grid[0])
DP = grid[:]
for i in range(1, y):
DP[0][i] = DP[0][i - 1] + DP[0][i]
for j in range(1, x):
DP[j][0] = DP[j - 1][0] + DP[j][0]
for i in range(1, x):
for j in range(1, y):
DP[i][j] = min(DP[i - 1][j], DP[i][j - 1]) + DP[i][j]
return DP[x - 1][y - 1]
# 执行耗时: 60ms, 击败了65.16 % 的Python3用户
# 内存消耗: 14.9MB, 击败了75.39 % 的Python3用户
# 自底向上
def minPathSum_DP2(self, grid: List[List[int]]) -> int:
x = len(grid)
if x == 0:
return 0
else:
y = len(grid[0])
DP = grid[:]
for i in range(y - 2, -1, -1):
DP[x - 1][i] = DP[x - 1][i + 1] + DP[x - 1][i]
for j in range(x - 2, -1, -1):
DP[j][y - 1] = DP[j + 1][y - 1] + DP[j][y - 1]
for i in range(x - 2, -1, -1):
for j in range(y - 2, -1, -1):
DP[i][j] = min(DP[i + 1][j], DP[i][j + 1]) + DP[i][j]
return DP[0][0]
# 执行耗时: 60ms, 击败了65.16 % 的Python3用户
# 内存消耗: 15MB, 击败了59.41 % 的Python3用户
# 优化存储空间,只有一维保存DP状态方程,自顶向下
def minPathSum(self, grid: List[List[int]]) -> int:
x = len(grid)
if x == 0:
return 0
else:
y = len(grid[0])
DP = grid[0][:]
for j in range(1, y):
DP[j] = DP[j - 1] + grid[0][j]
for i in range(1, x):
DP[0] = DP[0] + grid[i][0]
for j in range(1, y):
DP[j] = min(DP[j - 1], DP[j]) + grid[i][j]
return DP[-1]
# 解答成功:执行耗时: 80ms, 击败了11.22 % 的Python3用户
# 内存消耗: 13.8MB, 击败了96.71 % 的Python3用户
|
[
"# -*- coding: utf-8 -*-\n\n\"\"\"\n@Time : 2020/8/8\n@Author : jim\n@File : [64]最小路径和\n@Description : \n\"\"\"\n\n# 思路1:BFS 暴力搜索\n\n# 思路2:DP\n# 状态数组:f[i][j] = min(f[i][j-1],f[i-1][j]) + g[i][j]\n# 状态转移方程 dp[]\n\n# 自顶向下\ndef minPathSum_DP1(self, grid: List[List[int]]) -> int:\n x = len(grid)\n if x == 0:\n return 0\n else:\n y = len(grid[0])\n DP = grid[:]\n for i in range(1, y):\n DP[0][i] = DP[0][i - 1] + DP[0][i]\n for j in range(1, x):\n DP[j][0] = DP[j - 1][0] + DP[j][0]\n for i in range(1, x):\n for j in range(1, y):\n DP[i][j] = min(DP[i - 1][j], DP[i][j - 1]) + DP[i][j]\n\n return DP[x - 1][y - 1]\n\n\n# 执行耗时: 60ms, 击败了65.16 % 的Python3用户\n# 内存消耗: 14.9MB, 击败了75.39 % 的Python3用户\n\n# 自底向上\ndef minPathSum_DP2(self, grid: List[List[int]]) -> int:\n x = len(grid)\n if x == 0:\n return 0\n else:\n y = len(grid[0])\n DP = grid[:]\n for i in range(y - 2, -1, -1):\n DP[x - 1][i] = DP[x - 1][i + 1] + DP[x - 1][i]\n for j in range(x - 2, -1, -1):\n DP[j][y - 1] = DP[j + 1][y - 1] + DP[j][y - 1]\n for i in range(x - 2, -1, -1):\n for j in range(y - 2, -1, -1):\n DP[i][j] = min(DP[i + 1][j], DP[i][j + 1]) + DP[i][j]\n\n return DP[0][0]\n\n# 执行耗时: 60ms, 击败了65.16 % 的Python3用户\n# 内存消耗: 15MB, 击败了59.41 % 的Python3用户\n\n# 优化存储空间,只有一维保存DP状态方程,自顶向下\ndef minPathSum(self, grid: List[List[int]]) -> int:\n x = len(grid)\n if x == 0:\n return 0\n else:\n y = len(grid[0])\n DP = grid[0][:]\n for j in range(1, y):\n DP[j] = DP[j - 1] + grid[0][j]\n for i in range(1, x):\n DP[0] = DP[0] + grid[i][0]\n for j in range(1, y):\n DP[j] = min(DP[j - 1], DP[j]) + grid[i][j]\n\n return DP[-1]\n\n# 解答成功:执行耗时: 80ms, 击败了11.22 % 的Python3用户\n# 内存消耗: 13.8MB, 击败了96.71 % 的Python3用户\n",
"<docstring token>\n\n\ndef minPathSum_DP1(self, grid: List[List[int]]) ->int:\n x = len(grid)\n if x == 0:\n return 0\n else:\n y = len(grid[0])\n DP = grid[:]\n for i in range(1, y):\n DP[0][i] = DP[0][i - 1] + DP[0][i]\n for j in range(1, x):\n DP[j][0] = DP[j - 1][0] + DP[j][0]\n for i in range(1, x):\n for j in range(1, y):\n DP[i][j] = min(DP[i - 1][j], DP[i][j - 1]) + DP[i][j]\n return DP[x - 1][y - 1]\n\n\ndef minPathSum_DP2(self, grid: List[List[int]]) ->int:\n x = len(grid)\n if x == 0:\n return 0\n else:\n y = len(grid[0])\n DP = grid[:]\n for i in range(y - 2, -1, -1):\n DP[x - 1][i] = DP[x - 1][i + 1] + DP[x - 1][i]\n for j in range(x - 2, -1, -1):\n DP[j][y - 1] = DP[j + 1][y - 1] + DP[j][y - 1]\n for i in range(x - 2, -1, -1):\n for j in range(y - 2, -1, -1):\n DP[i][j] = min(DP[i + 1][j], DP[i][j + 1]) + DP[i][j]\n return DP[0][0]\n\n\ndef minPathSum(self, grid: List[List[int]]) ->int:\n x = len(grid)\n if x == 0:\n return 0\n else:\n y = len(grid[0])\n DP = grid[0][:]\n for j in range(1, y):\n DP[j] = DP[j - 1] + grid[0][j]\n for i in range(1, x):\n DP[0] = DP[0] + grid[i][0]\n for j in range(1, y):\n DP[j] = min(DP[j - 1], DP[j]) + grid[i][j]\n return DP[-1]\n",
"<docstring token>\n\n\ndef minPathSum_DP1(self, grid: List[List[int]]) ->int:\n x = len(grid)\n if x == 0:\n return 0\n else:\n y = len(grid[0])\n DP = grid[:]\n for i in range(1, y):\n DP[0][i] = DP[0][i - 1] + DP[0][i]\n for j in range(1, x):\n DP[j][0] = DP[j - 1][0] + DP[j][0]\n for i in range(1, x):\n for j in range(1, y):\n DP[i][j] = min(DP[i - 1][j], DP[i][j - 1]) + DP[i][j]\n return DP[x - 1][y - 1]\n\n\ndef minPathSum_DP2(self, grid: List[List[int]]) ->int:\n x = len(grid)\n if x == 0:\n return 0\n else:\n y = len(grid[0])\n DP = grid[:]\n for i in range(y - 2, -1, -1):\n DP[x - 1][i] = DP[x - 1][i + 1] + DP[x - 1][i]\n for j in range(x - 2, -1, -1):\n DP[j][y - 1] = DP[j + 1][y - 1] + DP[j][y - 1]\n for i in range(x - 2, -1, -1):\n for j in range(y - 2, -1, -1):\n DP[i][j] = min(DP[i + 1][j], DP[i][j + 1]) + DP[i][j]\n return DP[0][0]\n\n\n<function token>\n",
"<docstring token>\n<function token>\n\n\ndef minPathSum_DP2(self, grid: List[List[int]]) ->int:\n x = len(grid)\n if x == 0:\n return 0\n else:\n y = len(grid[0])\n DP = grid[:]\n for i in range(y - 2, -1, -1):\n DP[x - 1][i] = DP[x - 1][i + 1] + DP[x - 1][i]\n for j in range(x - 2, -1, -1):\n DP[j][y - 1] = DP[j + 1][y - 1] + DP[j][y - 1]\n for i in range(x - 2, -1, -1):\n for j in range(y - 2, -1, -1):\n DP[i][j] = min(DP[i + 1][j], DP[i][j + 1]) + DP[i][j]\n return DP[0][0]\n\n\n<function token>\n",
"<docstring token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,352 |
da4b92350af5d6d8d8864aa36475269889487096
|
# Let's take this program and make it into a command line app.
# Let's also allow the user to declare their own files for:
# - User Dataset
# - Whitelisted values
# - Blacklisted values
import json
import re
import datetime
class DataAudit():
def open_dataset(dataset_path, dataset_create_bit=0):
dataset_load_flag = "r"
if dataset_create_bit:
dataset_load_flag = "r+"
dataset_file = open(dataset_path, "x")
emptylist = json.loads("[]")
json.dump(emptylist, dataset_file)
dataset_file.close()
# TODO: Learn best error handling practices for opening files
dataset_file = open(dataset_path, dataset_load_flag)
# TODO: Handle errors on JSON load
return json.load(dataset_file), dataset_file
def close_dataset(dataset_file_object, data=None):
json.dump(dataset_file_object, data)
dataset_file_object.close()
def open_list(list_path):
list_file = open(list_path)
list_set = json.load(list_file)
list_name = list(list_set.keys())[0]
list_object = list_set[list_name]
return list_object, list_file
def close_list(list_file_object):
list_file_object.close()
# In the following functions, n is the JSON entry in an array of entries.
# The field parameter is the name of a field in that entry.
def empty_check(n, field):
if not n[field]:
return False
return True
# TODO: Consider allowing direct comparisons as well as substring checks
def blacklist_check(n, field, blacklist):
if n[field]:
for i in blacklist:
if i == n[field]:
return False
return True
def whitelist_check(n, field, whitelist):
if not n[field]:
return False
if n[field] in whitelist:
return True
return False
def minimum_length_check(n, field, min):
if n[field]:
if min <= len(n[field]):
return True
return False
def maximum_length_check(n, field, max):
if n[field]:
if len(n[field]) <= max:
return True
return False
def type_check(n, field, typename):
if not n[field]:
return False
# This does work even if the given typename is invalid. Ugly.
valid_types = {'alnum': n[field].isalnum, 'digit': n[field].isdigit,
'alpha': n[field].isalpha}
if typename not in valid_types.keys():
raise Exception(f"The {typename} type is not supported.")
if not valid_types[typename]():
return False
return True
def regex_check(n, field, reg):
if not n[field]:
return False
pattern = re.compile(reg)
if pattern.fullmatch(n[field]):
return True
return False
# Timestamps must be in UTC without millisecond precision
def precedence_check(n, bef_field, aft_field):
if not n[bef_field]:
return False, "Before field is null."
if not n[aft_field]:
return False, "After field is null."
try:
before = datetime.datetime.strptime(
str(n[bef_field]), "%Y-%m-%dT%H:%M:%SZ")
except:
return False, f"Invalid {bef_field}."
try:
after = datetime.datetime.strptime(
str(n[aft_field]), "%Y-%m-%dT%H:%M:%SZ")
except:
return False, f"Invalid {aft_field}."
if before < after:
return True, f"{bef_field} is before {aft_field}"
return False, f"{bef_field} is not before {aft_field}."
def uniqueness_check(n, field, dataset):
fields = [i[field] for i in dataset]
if fields.count(n[field]) > 1:
return False
return True
|
[
"# Let's take this program and make it into a command line app.\n# Let's also allow the user to declare their own files for:\n# - User Dataset\n# - Whitelisted values\n# - Blacklisted values\n\nimport json\nimport re\nimport datetime\n\n\nclass DataAudit():\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = \"r\"\n if dataset_create_bit:\n dataset_load_flag = \"r+\"\n dataset_file = open(dataset_path, \"x\")\n emptylist = json.loads(\"[]\")\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n # TODO: Learn best error handling practices for opening files\n dataset_file = open(dataset_path, dataset_load_flag)\n # TODO: Handle errors on JSON load\n return json.load(dataset_file), dataset_file\n\n def close_dataset(dataset_file_object, data=None):\n json.dump(dataset_file_object, data)\n dataset_file_object.close()\n\n def open_list(list_path):\n list_file = open(list_path)\n list_set = json.load(list_file)\n list_name = list(list_set.keys())[0]\n list_object = list_set[list_name]\n return list_object, list_file\n\n def close_list(list_file_object):\n list_file_object.close()\n\n\n # In the following functions, n is the JSON entry in an array of entries.\n # The field parameter is the name of a field in that entry.\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n\n # TODO: Consider allowing direct comparisons as well as substring checks\n\n def blacklist_check(n, field, blacklist):\n if n[field]:\n for i in blacklist:\n if i == n[field]:\n return False\n return True\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n\n def minimum_length_check(n, field, min):\n if n[field]:\n if min <= len(n[field]):\n return True\n return False\n\n def maximum_length_check(n, field, max):\n if n[field]:\n if len(n[field]) <= max:\n return True\n return False\n\n def type_check(n, field, typename):\n if not n[field]:\n return False\n # This does work even if the given typename is invalid. Ugly.\n valid_types = {'alnum': n[field].isalnum, 'digit': n[field].isdigit,\n 'alpha': n[field].isalpha}\n if typename not in valid_types.keys():\n raise Exception(f\"The {typename} type is not supported.\")\n if not valid_types[typename]():\n return False\n return True\n\n def regex_check(n, field, reg):\n if not n[field]:\n return False\n pattern = re.compile(reg)\n if pattern.fullmatch(n[field]):\n return True\n return False\n\n # Timestamps must be in UTC without millisecond precision\n def precedence_check(n, bef_field, aft_field):\n if not n[bef_field]:\n return False, \"Before field is null.\"\n if not n[aft_field]:\n return False, \"After field is null.\"\n try:\n before = datetime.datetime.strptime(\n str(n[bef_field]), \"%Y-%m-%dT%H:%M:%SZ\")\n except:\n return False, f\"Invalid {bef_field}.\"\n try:\n after = datetime.datetime.strptime(\n str(n[aft_field]), \"%Y-%m-%dT%H:%M:%SZ\")\n except:\n return False, f\"Invalid {aft_field}.\"\n if before < after:\n return True, f\"{bef_field} is before {aft_field}\"\n return False, f\"{bef_field} is not before {aft_field}.\"\n\n def uniqueness_check(n, field, dataset):\n fields = [i[field] for i in dataset]\n if fields.count(n[field]) > 1:\n return False\n return True\n",
"import json\nimport re\nimport datetime\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n\n def close_dataset(dataset_file_object, data=None):\n json.dump(dataset_file_object, data)\n dataset_file_object.close()\n\n def open_list(list_path):\n list_file = open(list_path)\n list_set = json.load(list_file)\n list_name = list(list_set.keys())[0]\n list_object = list_set[list_name]\n return list_object, list_file\n\n def close_list(list_file_object):\n list_file_object.close()\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n\n def blacklist_check(n, field, blacklist):\n if n[field]:\n for i in blacklist:\n if i == n[field]:\n return False\n return True\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n\n def minimum_length_check(n, field, min):\n if n[field]:\n if min <= len(n[field]):\n return True\n return False\n\n def maximum_length_check(n, field, max):\n if n[field]:\n if len(n[field]) <= max:\n return True\n return False\n\n def type_check(n, field, typename):\n if not n[field]:\n return False\n valid_types = {'alnum': n[field].isalnum, 'digit': n[field].isdigit,\n 'alpha': n[field].isalpha}\n if typename not in valid_types.keys():\n raise Exception(f'The {typename} type is not supported.')\n if not valid_types[typename]():\n return False\n return True\n\n def regex_check(n, field, reg):\n if not n[field]:\n return False\n pattern = re.compile(reg)\n if pattern.fullmatch(n[field]):\n return True\n return False\n\n def precedence_check(n, bef_field, aft_field):\n if not n[bef_field]:\n return False, 'Before field is null.'\n if not n[aft_field]:\n return False, 'After field is null.'\n try:\n before = datetime.datetime.strptime(str(n[bef_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {bef_field}.'\n try:\n after = datetime.datetime.strptime(str(n[aft_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {aft_field}.'\n if before < after:\n return True, f'{bef_field} is before {aft_field}'\n return False, f'{bef_field} is not before {aft_field}.'\n\n def uniqueness_check(n, field, dataset):\n fields = [i[field] for i in dataset]\n if fields.count(n[field]) > 1:\n return False\n return True\n",
"<import token>\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n\n def close_dataset(dataset_file_object, data=None):\n json.dump(dataset_file_object, data)\n dataset_file_object.close()\n\n def open_list(list_path):\n list_file = open(list_path)\n list_set = json.load(list_file)\n list_name = list(list_set.keys())[0]\n list_object = list_set[list_name]\n return list_object, list_file\n\n def close_list(list_file_object):\n list_file_object.close()\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n\n def blacklist_check(n, field, blacklist):\n if n[field]:\n for i in blacklist:\n if i == n[field]:\n return False\n return True\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n\n def minimum_length_check(n, field, min):\n if n[field]:\n if min <= len(n[field]):\n return True\n return False\n\n def maximum_length_check(n, field, max):\n if n[field]:\n if len(n[field]) <= max:\n return True\n return False\n\n def type_check(n, field, typename):\n if not n[field]:\n return False\n valid_types = {'alnum': n[field].isalnum, 'digit': n[field].isdigit,\n 'alpha': n[field].isalpha}\n if typename not in valid_types.keys():\n raise Exception(f'The {typename} type is not supported.')\n if not valid_types[typename]():\n return False\n return True\n\n def regex_check(n, field, reg):\n if not n[field]:\n return False\n pattern = re.compile(reg)\n if pattern.fullmatch(n[field]):\n return True\n return False\n\n def precedence_check(n, bef_field, aft_field):\n if not n[bef_field]:\n return False, 'Before field is null.'\n if not n[aft_field]:\n return False, 'After field is null.'\n try:\n before = datetime.datetime.strptime(str(n[bef_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {bef_field}.'\n try:\n after = datetime.datetime.strptime(str(n[aft_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {aft_field}.'\n if before < after:\n return True, f'{bef_field} is before {aft_field}'\n return False, f'{bef_field} is not before {aft_field}.'\n\n def uniqueness_check(n, field, dataset):\n fields = [i[field] for i in dataset]\n if fields.count(n[field]) > 1:\n return False\n return True\n",
"<import token>\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n\n def close_dataset(dataset_file_object, data=None):\n json.dump(dataset_file_object, data)\n dataset_file_object.close()\n\n def open_list(list_path):\n list_file = open(list_path)\n list_set = json.load(list_file)\n list_name = list(list_set.keys())[0]\n list_object = list_set[list_name]\n return list_object, list_file\n\n def close_list(list_file_object):\n list_file_object.close()\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n <function token>\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n\n def minimum_length_check(n, field, min):\n if n[field]:\n if min <= len(n[field]):\n return True\n return False\n\n def maximum_length_check(n, field, max):\n if n[field]:\n if len(n[field]) <= max:\n return True\n return False\n\n def type_check(n, field, typename):\n if not n[field]:\n return False\n valid_types = {'alnum': n[field].isalnum, 'digit': n[field].isdigit,\n 'alpha': n[field].isalpha}\n if typename not in valid_types.keys():\n raise Exception(f'The {typename} type is not supported.')\n if not valid_types[typename]():\n return False\n return True\n\n def regex_check(n, field, reg):\n if not n[field]:\n return False\n pattern = re.compile(reg)\n if pattern.fullmatch(n[field]):\n return True\n return False\n\n def precedence_check(n, bef_field, aft_field):\n if not n[bef_field]:\n return False, 'Before field is null.'\n if not n[aft_field]:\n return False, 'After field is null.'\n try:\n before = datetime.datetime.strptime(str(n[bef_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {bef_field}.'\n try:\n after = datetime.datetime.strptime(str(n[aft_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {aft_field}.'\n if before < after:\n return True, f'{bef_field} is before {aft_field}'\n return False, f'{bef_field} is not before {aft_field}.'\n\n def uniqueness_check(n, field, dataset):\n fields = [i[field] for i in dataset]\n if fields.count(n[field]) > 1:\n return False\n return True\n",
"<import token>\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n\n def close_dataset(dataset_file_object, data=None):\n json.dump(dataset_file_object, data)\n dataset_file_object.close()\n\n def open_list(list_path):\n list_file = open(list_path)\n list_set = json.load(list_file)\n list_name = list(list_set.keys())[0]\n list_object = list_set[list_name]\n return list_object, list_file\n\n def close_list(list_file_object):\n list_file_object.close()\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n <function token>\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n\n def minimum_length_check(n, field, min):\n if n[field]:\n if min <= len(n[field]):\n return True\n return False\n\n def maximum_length_check(n, field, max):\n if n[field]:\n if len(n[field]) <= max:\n return True\n return False\n\n def type_check(n, field, typename):\n if not n[field]:\n return False\n valid_types = {'alnum': n[field].isalnum, 'digit': n[field].isdigit,\n 'alpha': n[field].isalpha}\n if typename not in valid_types.keys():\n raise Exception(f'The {typename} type is not supported.')\n if not valid_types[typename]():\n return False\n return True\n\n def regex_check(n, field, reg):\n if not n[field]:\n return False\n pattern = re.compile(reg)\n if pattern.fullmatch(n[field]):\n return True\n return False\n\n def precedence_check(n, bef_field, aft_field):\n if not n[bef_field]:\n return False, 'Before field is null.'\n if not n[aft_field]:\n return False, 'After field is null.'\n try:\n before = datetime.datetime.strptime(str(n[bef_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {bef_field}.'\n try:\n after = datetime.datetime.strptime(str(n[aft_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {aft_field}.'\n if before < after:\n return True, f'{bef_field} is before {aft_field}'\n return False, f'{bef_field} is not before {aft_field}.'\n <function token>\n",
"<import token>\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n\n def close_dataset(dataset_file_object, data=None):\n json.dump(dataset_file_object, data)\n dataset_file_object.close()\n\n def open_list(list_path):\n list_file = open(list_path)\n list_set = json.load(list_file)\n list_name = list(list_set.keys())[0]\n list_object = list_set[list_name]\n return list_object, list_file\n\n def close_list(list_file_object):\n list_file_object.close()\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n <function token>\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n <function token>\n\n def maximum_length_check(n, field, max):\n if n[field]:\n if len(n[field]) <= max:\n return True\n return False\n\n def type_check(n, field, typename):\n if not n[field]:\n return False\n valid_types = {'alnum': n[field].isalnum, 'digit': n[field].isdigit,\n 'alpha': n[field].isalpha}\n if typename not in valid_types.keys():\n raise Exception(f'The {typename} type is not supported.')\n if not valid_types[typename]():\n return False\n return True\n\n def regex_check(n, field, reg):\n if not n[field]:\n return False\n pattern = re.compile(reg)\n if pattern.fullmatch(n[field]):\n return True\n return False\n\n def precedence_check(n, bef_field, aft_field):\n if not n[bef_field]:\n return False, 'Before field is null.'\n if not n[aft_field]:\n return False, 'After field is null.'\n try:\n before = datetime.datetime.strptime(str(n[bef_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {bef_field}.'\n try:\n after = datetime.datetime.strptime(str(n[aft_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {aft_field}.'\n if before < after:\n return True, f'{bef_field} is before {aft_field}'\n return False, f'{bef_field} is not before {aft_field}.'\n <function token>\n",
"<import token>\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n\n def close_dataset(dataset_file_object, data=None):\n json.dump(dataset_file_object, data)\n dataset_file_object.close()\n\n def open_list(list_path):\n list_file = open(list_path)\n list_set = json.load(list_file)\n list_name = list(list_set.keys())[0]\n list_object = list_set[list_name]\n return list_object, list_file\n\n def close_list(list_file_object):\n list_file_object.close()\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n <function token>\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n <function token>\n\n def maximum_length_check(n, field, max):\n if n[field]:\n if len(n[field]) <= max:\n return True\n return False\n <function token>\n\n def regex_check(n, field, reg):\n if not n[field]:\n return False\n pattern = re.compile(reg)\n if pattern.fullmatch(n[field]):\n return True\n return False\n\n def precedence_check(n, bef_field, aft_field):\n if not n[bef_field]:\n return False, 'Before field is null.'\n if not n[aft_field]:\n return False, 'After field is null.'\n try:\n before = datetime.datetime.strptime(str(n[bef_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {bef_field}.'\n try:\n after = datetime.datetime.strptime(str(n[aft_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {aft_field}.'\n if before < after:\n return True, f'{bef_field} is before {aft_field}'\n return False, f'{bef_field} is not before {aft_field}.'\n <function token>\n",
"<import token>\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n\n def close_dataset(dataset_file_object, data=None):\n json.dump(dataset_file_object, data)\n dataset_file_object.close()\n\n def open_list(list_path):\n list_file = open(list_path)\n list_set = json.load(list_file)\n list_name = list(list_set.keys())[0]\n list_object = list_set[list_name]\n return list_object, list_file\n\n def close_list(list_file_object):\n list_file_object.close()\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n <function token>\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n <function token>\n <function token>\n <function token>\n\n def regex_check(n, field, reg):\n if not n[field]:\n return False\n pattern = re.compile(reg)\n if pattern.fullmatch(n[field]):\n return True\n return False\n\n def precedence_check(n, bef_field, aft_field):\n if not n[bef_field]:\n return False, 'Before field is null.'\n if not n[aft_field]:\n return False, 'After field is null.'\n try:\n before = datetime.datetime.strptime(str(n[bef_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {bef_field}.'\n try:\n after = datetime.datetime.strptime(str(n[aft_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {aft_field}.'\n if before < after:\n return True, f'{bef_field} is before {aft_field}'\n return False, f'{bef_field} is not before {aft_field}.'\n <function token>\n",
"<import token>\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n\n def close_dataset(dataset_file_object, data=None):\n json.dump(dataset_file_object, data)\n dataset_file_object.close()\n\n def open_list(list_path):\n list_file = open(list_path)\n list_set = json.load(list_file)\n list_name = list(list_set.keys())[0]\n list_object = list_set[list_name]\n return list_object, list_file\n\n def close_list(list_file_object):\n list_file_object.close()\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n <function token>\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n <function token>\n <function token>\n <function token>\n <function token>\n\n def precedence_check(n, bef_field, aft_field):\n if not n[bef_field]:\n return False, 'Before field is null.'\n if not n[aft_field]:\n return False, 'After field is null.'\n try:\n before = datetime.datetime.strptime(str(n[bef_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {bef_field}.'\n try:\n after = datetime.datetime.strptime(str(n[aft_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {aft_field}.'\n if before < after:\n return True, f'{bef_field} is before {aft_field}'\n return False, f'{bef_field} is not before {aft_field}.'\n <function token>\n",
"<import token>\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n\n def close_dataset(dataset_file_object, data=None):\n json.dump(dataset_file_object, data)\n dataset_file_object.close()\n <function token>\n\n def close_list(list_file_object):\n list_file_object.close()\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n <function token>\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n <function token>\n <function token>\n <function token>\n <function token>\n\n def precedence_check(n, bef_field, aft_field):\n if not n[bef_field]:\n return False, 'Before field is null.'\n if not n[aft_field]:\n return False, 'After field is null.'\n try:\n before = datetime.datetime.strptime(str(n[bef_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {bef_field}.'\n try:\n after = datetime.datetime.strptime(str(n[aft_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {aft_field}.'\n if before < after:\n return True, f'{bef_field} is before {aft_field}'\n return False, f'{bef_field} is not before {aft_field}.'\n <function token>\n",
"<import token>\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n <function token>\n <function token>\n\n def close_list(list_file_object):\n list_file_object.close()\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n <function token>\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n <function token>\n <function token>\n <function token>\n <function token>\n\n def precedence_check(n, bef_field, aft_field):\n if not n[bef_field]:\n return False, 'Before field is null.'\n if not n[aft_field]:\n return False, 'After field is null.'\n try:\n before = datetime.datetime.strptime(str(n[bef_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {bef_field}.'\n try:\n after = datetime.datetime.strptime(str(n[aft_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {aft_field}.'\n if before < after:\n return True, f'{bef_field} is before {aft_field}'\n return False, f'{bef_field} is not before {aft_field}.'\n <function token>\n",
"<import token>\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n <function token>\n <function token>\n <function token>\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n <function token>\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n <function token>\n <function token>\n <function token>\n <function token>\n\n def precedence_check(n, bef_field, aft_field):\n if not n[bef_field]:\n return False, 'Before field is null.'\n if not n[aft_field]:\n return False, 'After field is null.'\n try:\n before = datetime.datetime.strptime(str(n[bef_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {bef_field}.'\n try:\n after = datetime.datetime.strptime(str(n[aft_field]),\n '%Y-%m-%dT%H:%M:%SZ')\n except:\n return False, f'Invalid {aft_field}.'\n if before < after:\n return True, f'{bef_field} is before {aft_field}'\n return False, f'{bef_field} is not before {aft_field}.'\n <function token>\n",
"<import token>\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n <function token>\n <function token>\n <function token>\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n <function token>\n\n def whitelist_check(n, field, whitelist):\n if not n[field]:\n return False\n if n[field] in whitelist:\n return True\n return False\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass DataAudit:\n\n def open_dataset(dataset_path, dataset_create_bit=0):\n dataset_load_flag = 'r'\n if dataset_create_bit:\n dataset_load_flag = 'r+'\n dataset_file = open(dataset_path, 'x')\n emptylist = json.loads('[]')\n json.dump(emptylist, dataset_file)\n dataset_file.close()\n dataset_file = open(dataset_path, dataset_load_flag)\n return json.load(dataset_file), dataset_file\n <function token>\n <function token>\n <function token>\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass DataAudit:\n <function token>\n <function token>\n <function token>\n <function token>\n\n def empty_check(n, field):\n if not n[field]:\n return False\n return True\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass DataAudit:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,353 |
e7ab39a430c8e64a272f62bea6c7d867e415cde5
|
import pygame
import random
pygame.mixer.init()
pygame.mixer.music.load('Chubs.mp3')
pygame.mixer.music.play()
pygame.init()
# Colors
brown = (153 ,76 ,0)
white = (255, 255, 255)
red = (255, 0, 0)
black = (0, 0, 0)
dblue=(0,102,102)
grey =(128,128,128)
orange=(255,128,0)
venom=(0,153,76)
# Creating window
screen_width = 600
screen_height = 500
gameWindow = pygame.display.set_mode((screen_width, screen_height))
bgimg = pygame.image.load("snake1.jpg")
bgimg = pygame.transform.scale(bgimg, (screen_width, screen_height)).convert_alpha()
bgimg1 = pygame.image.load("gover.jpg")
bgimg1 = pygame.transform.scale(bgimg1, (screen_width, screen_height)).convert_alpha()
bgimg2 = pygame.image.load("ground2.jpg")
bgimg2 = pygame.transform.scale(bgimg2, (screen_width, screen_height)).convert_alpha()
# Game Title
pygame.display.set_caption("Snakes Game")
pygame.display.update()
clock = pygame.time.Clock()
font = pygame.font.SysFont(None,30 )
def text_screen(text, color, x, y):
screen_text = font.render(text, True, color)
gameWindow.blit(screen_text, [x,y])
def plot_snake(gameWindow, color, snk_list, snake_size):
for x,y in snk_list:
pygame.draw.rect(gameWindow, color, [x, y, snake_size, snake_size])
def welcome():
exit_game = False
while not exit_game:
gameWindow.fill((233,210,229))
gameWindow.blit(bgimg, (0, 0))
text_screen("*****Welcome to Snakes World*****", white, 135, 250)
text_screen("------------Press Space Bar To Play-------------", red, 100, 450)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
gameloop()
pygame.display.update()
clock.tick(30)
# Game Loop
def gameloop():
# Game specific variables
exit_game = False
game_over = False
snake_x = 45
snake_y = 55
velocity_x = 0
velocity_y = 0
snk_list = []
snk_length = 1
with open("hiscore.txt", "r") as f:
hiscore = f.read()
food_x = random.randint(10, screen_width/2)
food_y = random.randint(10, screen_height/2)
score = 0
init_velocity = 5
snake_size = 20
fps = 20
while not exit_game:
if game_over:
with open("hiscore.txt", "w") as f:
f.write(str(hiscore))
gameWindow.fill(white)
gameWindow.blit(bgimg1, (0, 0))
text_screen("Game Over! Press Enter To Continue",dblue , 110, 450)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
pygame.mixer.music.load('Chubs.mp3')
pygame.mixer.music.play()
gameloop()
else:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
velocity_x = init_velocity
velocity_y = 0
if event.key == pygame.K_LEFT:
velocity_x = - init_velocity
velocity_y = 0
if event.key == pygame.K_UP:
velocity_y = - init_velocity
velocity_x = 0
if event.key == pygame.K_DOWN:
velocity_y = init_velocity
velocity_x = 0
snake_x = snake_x + velocity_x
snake_y = snake_y + velocity_y
if abs(snake_x - food_x)<9 and abs(snake_y - food_y)<9:
score +=10
food_x = random.randint(10, screen_width/2)
food_y = random.randint(10, screen_height/2)
snk_length +=5
if score>int(hiscore):
hiscore = score
gameWindow.fill(brown)
gameWindow.blit(bgimg2, (0, 0))
text_screen("Score: " + str(score) + " High score: "+str(hiscore), white,20, 20)
pygame.draw.rect(gameWindow, orange, [food_x, food_y,snake_size/1.4,snake_size/1.4])
head = []
head.append(snake_x)
head.append(snake_y)
snk_list.append(head)
if len(snk_list)>snk_length:
del snk_list[0]
if head in snk_list[:-1]:
game_over = True
pygame.mixer.music.load('govver.mp3')
pygame.mixer.music.play()
if snake_x<0 or snake_x>screen_width or snake_y<0 or snake_y>screen_height:
game_over = True
pygame.mixer.music.load('govver.mp3')
pygame.mixer.music.play()
plot_snake(gameWindow, black, snk_list, snake_size)
pygame.display.update()
clock.tick(fps)
pygame.quit()
quit()
welcome()
|
[
"\r\n\r\nimport pygame\r\nimport random\r\n\r\npygame.mixer.init()\r\npygame.mixer.music.load('Chubs.mp3')\r\npygame.mixer.music.play()\r\npygame.init()\r\n\r\n\r\n# Colors\r\nbrown = (153 ,76 ,0)\r\nwhite = (255, 255, 255)\r\nred = (255, 0, 0)\r\nblack = (0, 0, 0)\r\ndblue=(0,102,102)\r\ngrey =(128,128,128)\r\norange=(255,128,0)\r\nvenom=(0,153,76)\r\n# Creating window\r\nscreen_width = 600\r\nscreen_height = 500\r\ngameWindow = pygame.display.set_mode((screen_width, screen_height))\r\n\r\n\r\nbgimg = pygame.image.load(\"snake1.jpg\")\r\nbgimg = pygame.transform.scale(bgimg, (screen_width, screen_height)).convert_alpha() \r\n\r\nbgimg1 = pygame.image.load(\"gover.jpg\")\r\nbgimg1 = pygame.transform.scale(bgimg1, (screen_width, screen_height)).convert_alpha()\r\n\r\nbgimg2 = pygame.image.load(\"ground2.jpg\")\r\nbgimg2 = pygame.transform.scale(bgimg2, (screen_width, screen_height)).convert_alpha()\r\n\r\n# Game Title\r\npygame.display.set_caption(\"Snakes Game\")\r\npygame.display.update()\r\nclock = pygame.time.Clock()\r\nfont = pygame.font.SysFont(None,30 )\r\n\r\n\r\ndef text_screen(text, color, x, y):\r\n screen_text = font.render(text, True, color)\r\n gameWindow.blit(screen_text, [x,y])\r\n\r\n\r\ndef plot_snake(gameWindow, color, snk_list, snake_size):\r\n for x,y in snk_list:\r\n pygame.draw.rect(gameWindow, color, [x, y, snake_size, snake_size])\r\n\r\ndef welcome():\r\n exit_game = False\r\n while not exit_game:\r\n gameWindow.fill((233,210,229))\r\n gameWindow.blit(bgimg, (0, 0))\r\n text_screen(\"*****Welcome to Snakes World*****\", white, 135, 250)\r\n text_screen(\"------------Press Space Bar To Play-------------\", red, 100, 450)\r\n for event in pygame.event.get():\r\n \r\n if event.type == pygame.QUIT:\r\n exit_game = True\r\n if event.type == pygame.KEYDOWN:\r\n \r\n\r\n if event.key == pygame.K_SPACE:\r\n \r\n gameloop()\r\n pygame.display.update()\r\n clock.tick(30) \r\n\r\n# Game Loop\r\ndef gameloop():\r\n # Game specific variables\r\n exit_game = False\r\n game_over = False\r\n snake_x = 45\r\n snake_y = 55\r\n velocity_x = 0\r\n velocity_y = 0\r\n snk_list = []\r\n snk_length = 1\r\n with open(\"hiscore.txt\", \"r\") as f:\r\n hiscore = f.read()\r\n\r\n food_x = random.randint(10, screen_width/2)\r\n food_y = random.randint(10, screen_height/2)\r\n score = 0\r\n init_velocity = 5\r\n snake_size = 20\r\n fps = 20\r\n while not exit_game:\r\n if game_over:\r\n with open(\"hiscore.txt\", \"w\") as f:\r\n f.write(str(hiscore))\r\n gameWindow.fill(white)\r\n gameWindow.blit(bgimg1, (0, 0))\r\n \r\n text_screen(\"Game Over! Press Enter To Continue\",dblue , 110, 450)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit_game = True\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_RETURN:\r\n pygame.mixer.music.load('Chubs.mp3')\r\n pygame.mixer.music.play()\r\n gameloop()\r\n\r\n else:\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit_game = True\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_RIGHT:\r\n velocity_x = init_velocity\r\n velocity_y = 0\r\n\r\n if event.key == pygame.K_LEFT:\r\n velocity_x = - init_velocity\r\n velocity_y = 0\r\n\r\n if event.key == pygame.K_UP:\r\n velocity_y = - init_velocity\r\n velocity_x = 0\r\n\r\n if event.key == pygame.K_DOWN:\r\n velocity_y = init_velocity\r\n velocity_x = 0\r\n\r\n snake_x = snake_x + velocity_x\r\n snake_y = snake_y + velocity_y\r\n\r\n if abs(snake_x - food_x)<9 and abs(snake_y - food_y)<9:\r\n \r\n score +=10\r\n food_x = random.randint(10, screen_width/2)\r\n food_y = random.randint(10, screen_height/2)\r\n snk_length +=5\r\n if score>int(hiscore):\r\n hiscore = score\r\n\r\n gameWindow.fill(brown)\r\n gameWindow.blit(bgimg2, (0, 0))\r\n \r\n text_screen(\"Score: \" + str(score) + \" High score: \"+str(hiscore), white,20, 20)\r\n pygame.draw.rect(gameWindow, orange, [food_x, food_y,snake_size/1.4,snake_size/1.4])\r\n\r\n\r\n head = []\r\n head.append(snake_x)\r\n head.append(snake_y)\r\n snk_list.append(head)\r\n\r\n if len(snk_list)>snk_length:\r\n del snk_list[0]\r\n\r\n if head in snk_list[:-1]:\r\n game_over = True\r\n pygame.mixer.music.load('govver.mp3')\r\n pygame.mixer.music.play()\r\n\r\n if snake_x<0 or snake_x>screen_width or snake_y<0 or snake_y>screen_height:\r\n game_over = True\r\n pygame.mixer.music.load('govver.mp3')\r\n pygame.mixer.music.play()\r\n plot_snake(gameWindow, black, snk_list, snake_size)\r\n pygame.display.update()\r\n clock.tick(fps)\r\n\r\n pygame.quit()\r\n quit()\r\n\r\n\r\nwelcome()",
"import pygame\nimport random\npygame.mixer.init()\npygame.mixer.music.load('Chubs.mp3')\npygame.mixer.music.play()\npygame.init()\nbrown = 153, 76, 0\nwhite = 255, 255, 255\nred = 255, 0, 0\nblack = 0, 0, 0\ndblue = 0, 102, 102\ngrey = 128, 128, 128\norange = 255, 128, 0\nvenom = 0, 153, 76\nscreen_width = 600\nscreen_height = 500\ngameWindow = pygame.display.set_mode((screen_width, screen_height))\nbgimg = pygame.image.load('snake1.jpg')\nbgimg = pygame.transform.scale(bgimg, (screen_width, screen_height)\n ).convert_alpha()\nbgimg1 = pygame.image.load('gover.jpg')\nbgimg1 = pygame.transform.scale(bgimg1, (screen_width, screen_height)\n ).convert_alpha()\nbgimg2 = pygame.image.load('ground2.jpg')\nbgimg2 = pygame.transform.scale(bgimg2, (screen_width, screen_height)\n ).convert_alpha()\npygame.display.set_caption('Snakes Game')\npygame.display.update()\nclock = pygame.time.Clock()\nfont = pygame.font.SysFont(None, 30)\n\n\ndef text_screen(text, color, x, y):\n screen_text = font.render(text, True, color)\n gameWindow.blit(screen_text, [x, y])\n\n\ndef plot_snake(gameWindow, color, snk_list, snake_size):\n for x, y in snk_list:\n pygame.draw.rect(gameWindow, color, [x, y, snake_size, snake_size])\n\n\ndef welcome():\n exit_game = False\n while not exit_game:\n gameWindow.fill((233, 210, 229))\n gameWindow.blit(bgimg, (0, 0))\n text_screen('*****Welcome to Snakes World*****', white, 135, 250)\n text_screen('------------Press Space Bar To Play-------------', red,\n 100, 450)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n gameloop()\n pygame.display.update()\n clock.tick(30)\n\n\ndef gameloop():\n exit_game = False\n game_over = False\n snake_x = 45\n snake_y = 55\n velocity_x = 0\n velocity_y = 0\n snk_list = []\n snk_length = 1\n with open('hiscore.txt', 'r') as f:\n hiscore = f.read()\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n score = 0\n init_velocity = 5\n snake_size = 20\n fps = 20\n while not exit_game:\n if game_over:\n with open('hiscore.txt', 'w') as f:\n f.write(str(hiscore))\n gameWindow.fill(white)\n gameWindow.blit(bgimg1, (0, 0))\n text_screen('Game Over! Press Enter To Continue', dblue, 110, 450)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n pygame.mixer.music.load('Chubs.mp3')\n pygame.mixer.music.play()\n gameloop()\n else:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n velocity_x = init_velocity\n velocity_y = 0\n if event.key == pygame.K_LEFT:\n velocity_x = -init_velocity\n velocity_y = 0\n if event.key == pygame.K_UP:\n velocity_y = -init_velocity\n velocity_x = 0\n if event.key == pygame.K_DOWN:\n velocity_y = init_velocity\n velocity_x = 0\n snake_x = snake_x + velocity_x\n snake_y = snake_y + velocity_y\n if abs(snake_x - food_x) < 9 and abs(snake_y - food_y) < 9:\n score += 10\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n snk_length += 5\n if score > int(hiscore):\n hiscore = score\n gameWindow.fill(brown)\n gameWindow.blit(bgimg2, (0, 0))\n text_screen('Score: ' + str(score) +\n ' High score: '\n + str(hiscore), white, 20, 20)\n pygame.draw.rect(gameWindow, orange, [food_x, food_y, \n snake_size / 1.4, snake_size / 1.4])\n head = []\n head.append(snake_x)\n head.append(snake_y)\n snk_list.append(head)\n if len(snk_list) > snk_length:\n del snk_list[0]\n if head in snk_list[:-1]:\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n if (snake_x < 0 or snake_x > screen_width or snake_y < 0 or \n snake_y > screen_height):\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n plot_snake(gameWindow, black, snk_list, snake_size)\n pygame.display.update()\n clock.tick(fps)\n pygame.quit()\n quit()\n\n\nwelcome()\n",
"<import token>\npygame.mixer.init()\npygame.mixer.music.load('Chubs.mp3')\npygame.mixer.music.play()\npygame.init()\nbrown = 153, 76, 0\nwhite = 255, 255, 255\nred = 255, 0, 0\nblack = 0, 0, 0\ndblue = 0, 102, 102\ngrey = 128, 128, 128\norange = 255, 128, 0\nvenom = 0, 153, 76\nscreen_width = 600\nscreen_height = 500\ngameWindow = pygame.display.set_mode((screen_width, screen_height))\nbgimg = pygame.image.load('snake1.jpg')\nbgimg = pygame.transform.scale(bgimg, (screen_width, screen_height)\n ).convert_alpha()\nbgimg1 = pygame.image.load('gover.jpg')\nbgimg1 = pygame.transform.scale(bgimg1, (screen_width, screen_height)\n ).convert_alpha()\nbgimg2 = pygame.image.load('ground2.jpg')\nbgimg2 = pygame.transform.scale(bgimg2, (screen_width, screen_height)\n ).convert_alpha()\npygame.display.set_caption('Snakes Game')\npygame.display.update()\nclock = pygame.time.Clock()\nfont = pygame.font.SysFont(None, 30)\n\n\ndef text_screen(text, color, x, y):\n screen_text = font.render(text, True, color)\n gameWindow.blit(screen_text, [x, y])\n\n\ndef plot_snake(gameWindow, color, snk_list, snake_size):\n for x, y in snk_list:\n pygame.draw.rect(gameWindow, color, [x, y, snake_size, snake_size])\n\n\ndef welcome():\n exit_game = False\n while not exit_game:\n gameWindow.fill((233, 210, 229))\n gameWindow.blit(bgimg, (0, 0))\n text_screen('*****Welcome to Snakes World*****', white, 135, 250)\n text_screen('------------Press Space Bar To Play-------------', red,\n 100, 450)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n gameloop()\n pygame.display.update()\n clock.tick(30)\n\n\ndef gameloop():\n exit_game = False\n game_over = False\n snake_x = 45\n snake_y = 55\n velocity_x = 0\n velocity_y = 0\n snk_list = []\n snk_length = 1\n with open('hiscore.txt', 'r') as f:\n hiscore = f.read()\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n score = 0\n init_velocity = 5\n snake_size = 20\n fps = 20\n while not exit_game:\n if game_over:\n with open('hiscore.txt', 'w') as f:\n f.write(str(hiscore))\n gameWindow.fill(white)\n gameWindow.blit(bgimg1, (0, 0))\n text_screen('Game Over! Press Enter To Continue', dblue, 110, 450)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n pygame.mixer.music.load('Chubs.mp3')\n pygame.mixer.music.play()\n gameloop()\n else:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n velocity_x = init_velocity\n velocity_y = 0\n if event.key == pygame.K_LEFT:\n velocity_x = -init_velocity\n velocity_y = 0\n if event.key == pygame.K_UP:\n velocity_y = -init_velocity\n velocity_x = 0\n if event.key == pygame.K_DOWN:\n velocity_y = init_velocity\n velocity_x = 0\n snake_x = snake_x + velocity_x\n snake_y = snake_y + velocity_y\n if abs(snake_x - food_x) < 9 and abs(snake_y - food_y) < 9:\n score += 10\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n snk_length += 5\n if score > int(hiscore):\n hiscore = score\n gameWindow.fill(brown)\n gameWindow.blit(bgimg2, (0, 0))\n text_screen('Score: ' + str(score) +\n ' High score: '\n + str(hiscore), white, 20, 20)\n pygame.draw.rect(gameWindow, orange, [food_x, food_y, \n snake_size / 1.4, snake_size / 1.4])\n head = []\n head.append(snake_x)\n head.append(snake_y)\n snk_list.append(head)\n if len(snk_list) > snk_length:\n del snk_list[0]\n if head in snk_list[:-1]:\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n if (snake_x < 0 or snake_x > screen_width or snake_y < 0 or \n snake_y > screen_height):\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n plot_snake(gameWindow, black, snk_list, snake_size)\n pygame.display.update()\n clock.tick(fps)\n pygame.quit()\n quit()\n\n\nwelcome()\n",
"<import token>\npygame.mixer.init()\npygame.mixer.music.load('Chubs.mp3')\npygame.mixer.music.play()\npygame.init()\n<assignment token>\npygame.display.set_caption('Snakes Game')\npygame.display.update()\n<assignment token>\n\n\ndef text_screen(text, color, x, y):\n screen_text = font.render(text, True, color)\n gameWindow.blit(screen_text, [x, y])\n\n\ndef plot_snake(gameWindow, color, snk_list, snake_size):\n for x, y in snk_list:\n pygame.draw.rect(gameWindow, color, [x, y, snake_size, snake_size])\n\n\ndef welcome():\n exit_game = False\n while not exit_game:\n gameWindow.fill((233, 210, 229))\n gameWindow.blit(bgimg, (0, 0))\n text_screen('*****Welcome to Snakes World*****', white, 135, 250)\n text_screen('------------Press Space Bar To Play-------------', red,\n 100, 450)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n gameloop()\n pygame.display.update()\n clock.tick(30)\n\n\ndef gameloop():\n exit_game = False\n game_over = False\n snake_x = 45\n snake_y = 55\n velocity_x = 0\n velocity_y = 0\n snk_list = []\n snk_length = 1\n with open('hiscore.txt', 'r') as f:\n hiscore = f.read()\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n score = 0\n init_velocity = 5\n snake_size = 20\n fps = 20\n while not exit_game:\n if game_over:\n with open('hiscore.txt', 'w') as f:\n f.write(str(hiscore))\n gameWindow.fill(white)\n gameWindow.blit(bgimg1, (0, 0))\n text_screen('Game Over! Press Enter To Continue', dblue, 110, 450)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n pygame.mixer.music.load('Chubs.mp3')\n pygame.mixer.music.play()\n gameloop()\n else:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n velocity_x = init_velocity\n velocity_y = 0\n if event.key == pygame.K_LEFT:\n velocity_x = -init_velocity\n velocity_y = 0\n if event.key == pygame.K_UP:\n velocity_y = -init_velocity\n velocity_x = 0\n if event.key == pygame.K_DOWN:\n velocity_y = init_velocity\n velocity_x = 0\n snake_x = snake_x + velocity_x\n snake_y = snake_y + velocity_y\n if abs(snake_x - food_x) < 9 and abs(snake_y - food_y) < 9:\n score += 10\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n snk_length += 5\n if score > int(hiscore):\n hiscore = score\n gameWindow.fill(brown)\n gameWindow.blit(bgimg2, (0, 0))\n text_screen('Score: ' + str(score) +\n ' High score: '\n + str(hiscore), white, 20, 20)\n pygame.draw.rect(gameWindow, orange, [food_x, food_y, \n snake_size / 1.4, snake_size / 1.4])\n head = []\n head.append(snake_x)\n head.append(snake_y)\n snk_list.append(head)\n if len(snk_list) > snk_length:\n del snk_list[0]\n if head in snk_list[:-1]:\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n if (snake_x < 0 or snake_x > screen_width or snake_y < 0 or \n snake_y > screen_height):\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n plot_snake(gameWindow, black, snk_list, snake_size)\n pygame.display.update()\n clock.tick(fps)\n pygame.quit()\n quit()\n\n\nwelcome()\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef text_screen(text, color, x, y):\n screen_text = font.render(text, True, color)\n gameWindow.blit(screen_text, [x, y])\n\n\ndef plot_snake(gameWindow, color, snk_list, snake_size):\n for x, y in snk_list:\n pygame.draw.rect(gameWindow, color, [x, y, snake_size, snake_size])\n\n\ndef welcome():\n exit_game = False\n while not exit_game:\n gameWindow.fill((233, 210, 229))\n gameWindow.blit(bgimg, (0, 0))\n text_screen('*****Welcome to Snakes World*****', white, 135, 250)\n text_screen('------------Press Space Bar To Play-------------', red,\n 100, 450)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n gameloop()\n pygame.display.update()\n clock.tick(30)\n\n\ndef gameloop():\n exit_game = False\n game_over = False\n snake_x = 45\n snake_y = 55\n velocity_x = 0\n velocity_y = 0\n snk_list = []\n snk_length = 1\n with open('hiscore.txt', 'r') as f:\n hiscore = f.read()\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n score = 0\n init_velocity = 5\n snake_size = 20\n fps = 20\n while not exit_game:\n if game_over:\n with open('hiscore.txt', 'w') as f:\n f.write(str(hiscore))\n gameWindow.fill(white)\n gameWindow.blit(bgimg1, (0, 0))\n text_screen('Game Over! Press Enter To Continue', dblue, 110, 450)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n pygame.mixer.music.load('Chubs.mp3')\n pygame.mixer.music.play()\n gameloop()\n else:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n velocity_x = init_velocity\n velocity_y = 0\n if event.key == pygame.K_LEFT:\n velocity_x = -init_velocity\n velocity_y = 0\n if event.key == pygame.K_UP:\n velocity_y = -init_velocity\n velocity_x = 0\n if event.key == pygame.K_DOWN:\n velocity_y = init_velocity\n velocity_x = 0\n snake_x = snake_x + velocity_x\n snake_y = snake_y + velocity_y\n if abs(snake_x - food_x) < 9 and abs(snake_y - food_y) < 9:\n score += 10\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n snk_length += 5\n if score > int(hiscore):\n hiscore = score\n gameWindow.fill(brown)\n gameWindow.blit(bgimg2, (0, 0))\n text_screen('Score: ' + str(score) +\n ' High score: '\n + str(hiscore), white, 20, 20)\n pygame.draw.rect(gameWindow, orange, [food_x, food_y, \n snake_size / 1.4, snake_size / 1.4])\n head = []\n head.append(snake_x)\n head.append(snake_y)\n snk_list.append(head)\n if len(snk_list) > snk_length:\n del snk_list[0]\n if head in snk_list[:-1]:\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n if (snake_x < 0 or snake_x > screen_width or snake_y < 0 or \n snake_y > screen_height):\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n plot_snake(gameWindow, black, snk_list, snake_size)\n pygame.display.update()\n clock.tick(fps)\n pygame.quit()\n quit()\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef plot_snake(gameWindow, color, snk_list, snake_size):\n for x, y in snk_list:\n pygame.draw.rect(gameWindow, color, [x, y, snake_size, snake_size])\n\n\ndef welcome():\n exit_game = False\n while not exit_game:\n gameWindow.fill((233, 210, 229))\n gameWindow.blit(bgimg, (0, 0))\n text_screen('*****Welcome to Snakes World*****', white, 135, 250)\n text_screen('------------Press Space Bar To Play-------------', red,\n 100, 450)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n gameloop()\n pygame.display.update()\n clock.tick(30)\n\n\ndef gameloop():\n exit_game = False\n game_over = False\n snake_x = 45\n snake_y = 55\n velocity_x = 0\n velocity_y = 0\n snk_list = []\n snk_length = 1\n with open('hiscore.txt', 'r') as f:\n hiscore = f.read()\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n score = 0\n init_velocity = 5\n snake_size = 20\n fps = 20\n while not exit_game:\n if game_over:\n with open('hiscore.txt', 'w') as f:\n f.write(str(hiscore))\n gameWindow.fill(white)\n gameWindow.blit(bgimg1, (0, 0))\n text_screen('Game Over! Press Enter To Continue', dblue, 110, 450)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n pygame.mixer.music.load('Chubs.mp3')\n pygame.mixer.music.play()\n gameloop()\n else:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n velocity_x = init_velocity\n velocity_y = 0\n if event.key == pygame.K_LEFT:\n velocity_x = -init_velocity\n velocity_y = 0\n if event.key == pygame.K_UP:\n velocity_y = -init_velocity\n velocity_x = 0\n if event.key == pygame.K_DOWN:\n velocity_y = init_velocity\n velocity_x = 0\n snake_x = snake_x + velocity_x\n snake_y = snake_y + velocity_y\n if abs(snake_x - food_x) < 9 and abs(snake_y - food_y) < 9:\n score += 10\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n snk_length += 5\n if score > int(hiscore):\n hiscore = score\n gameWindow.fill(brown)\n gameWindow.blit(bgimg2, (0, 0))\n text_screen('Score: ' + str(score) +\n ' High score: '\n + str(hiscore), white, 20, 20)\n pygame.draw.rect(gameWindow, orange, [food_x, food_y, \n snake_size / 1.4, snake_size / 1.4])\n head = []\n head.append(snake_x)\n head.append(snake_y)\n snk_list.append(head)\n if len(snk_list) > snk_length:\n del snk_list[0]\n if head in snk_list[:-1]:\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n if (snake_x < 0 or snake_x > screen_width or snake_y < 0 or \n snake_y > screen_height):\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n plot_snake(gameWindow, black, snk_list, snake_size)\n pygame.display.update()\n clock.tick(fps)\n pygame.quit()\n quit()\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef plot_snake(gameWindow, color, snk_list, snake_size):\n for x, y in snk_list:\n pygame.draw.rect(gameWindow, color, [x, y, snake_size, snake_size])\n\n\n<function token>\n\n\ndef gameloop():\n exit_game = False\n game_over = False\n snake_x = 45\n snake_y = 55\n velocity_x = 0\n velocity_y = 0\n snk_list = []\n snk_length = 1\n with open('hiscore.txt', 'r') as f:\n hiscore = f.read()\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n score = 0\n init_velocity = 5\n snake_size = 20\n fps = 20\n while not exit_game:\n if game_over:\n with open('hiscore.txt', 'w') as f:\n f.write(str(hiscore))\n gameWindow.fill(white)\n gameWindow.blit(bgimg1, (0, 0))\n text_screen('Game Over! Press Enter To Continue', dblue, 110, 450)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n pygame.mixer.music.load('Chubs.mp3')\n pygame.mixer.music.play()\n gameloop()\n else:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n velocity_x = init_velocity\n velocity_y = 0\n if event.key == pygame.K_LEFT:\n velocity_x = -init_velocity\n velocity_y = 0\n if event.key == pygame.K_UP:\n velocity_y = -init_velocity\n velocity_x = 0\n if event.key == pygame.K_DOWN:\n velocity_y = init_velocity\n velocity_x = 0\n snake_x = snake_x + velocity_x\n snake_y = snake_y + velocity_y\n if abs(snake_x - food_x) < 9 and abs(snake_y - food_y) < 9:\n score += 10\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n snk_length += 5\n if score > int(hiscore):\n hiscore = score\n gameWindow.fill(brown)\n gameWindow.blit(bgimg2, (0, 0))\n text_screen('Score: ' + str(score) +\n ' High score: '\n + str(hiscore), white, 20, 20)\n pygame.draw.rect(gameWindow, orange, [food_x, food_y, \n snake_size / 1.4, snake_size / 1.4])\n head = []\n head.append(snake_x)\n head.append(snake_y)\n snk_list.append(head)\n if len(snk_list) > snk_length:\n del snk_list[0]\n if head in snk_list[:-1]:\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n if (snake_x < 0 or snake_x > screen_width or snake_y < 0 or \n snake_y > screen_height):\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n plot_snake(gameWindow, black, snk_list, snake_size)\n pygame.display.update()\n clock.tick(fps)\n pygame.quit()\n quit()\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef gameloop():\n exit_game = False\n game_over = False\n snake_x = 45\n snake_y = 55\n velocity_x = 0\n velocity_y = 0\n snk_list = []\n snk_length = 1\n with open('hiscore.txt', 'r') as f:\n hiscore = f.read()\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n score = 0\n init_velocity = 5\n snake_size = 20\n fps = 20\n while not exit_game:\n if game_over:\n with open('hiscore.txt', 'w') as f:\n f.write(str(hiscore))\n gameWindow.fill(white)\n gameWindow.blit(bgimg1, (0, 0))\n text_screen('Game Over! Press Enter To Continue', dblue, 110, 450)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n pygame.mixer.music.load('Chubs.mp3')\n pygame.mixer.music.play()\n gameloop()\n else:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n velocity_x = init_velocity\n velocity_y = 0\n if event.key == pygame.K_LEFT:\n velocity_x = -init_velocity\n velocity_y = 0\n if event.key == pygame.K_UP:\n velocity_y = -init_velocity\n velocity_x = 0\n if event.key == pygame.K_DOWN:\n velocity_y = init_velocity\n velocity_x = 0\n snake_x = snake_x + velocity_x\n snake_y = snake_y + velocity_y\n if abs(snake_x - food_x) < 9 and abs(snake_y - food_y) < 9:\n score += 10\n food_x = random.randint(10, screen_width / 2)\n food_y = random.randint(10, screen_height / 2)\n snk_length += 5\n if score > int(hiscore):\n hiscore = score\n gameWindow.fill(brown)\n gameWindow.blit(bgimg2, (0, 0))\n text_screen('Score: ' + str(score) +\n ' High score: '\n + str(hiscore), white, 20, 20)\n pygame.draw.rect(gameWindow, orange, [food_x, food_y, \n snake_size / 1.4, snake_size / 1.4])\n head = []\n head.append(snake_x)\n head.append(snake_y)\n snk_list.append(head)\n if len(snk_list) > snk_length:\n del snk_list[0]\n if head in snk_list[:-1]:\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n if (snake_x < 0 or snake_x > screen_width or snake_y < 0 or \n snake_y > screen_height):\n game_over = True\n pygame.mixer.music.load('govver.mp3')\n pygame.mixer.music.play()\n plot_snake(gameWindow, black, snk_list, snake_size)\n pygame.display.update()\n clock.tick(fps)\n pygame.quit()\n quit()\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,354 |
9e0c214672feb29f09957acd5cbfd1ae3866cf1a
|
import FWCore.ParameterSet.Config as cms
siStripDetVOffPrinter = cms.EDAnalyzer('SiStripDetVOffPrinter',
conditionDatabase = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
tagName = cms.string('SiStripDetVOff_1hourDelay_v1_Validation'),
startTime = cms.string('2002-01-20 23:59:59.000'),
endTime = cms.string('2002-01-20 23:59:59.000'),
output = cms.string('PerModuleSummary.txt'),
connect = cms.string(''),
DBParameters = cms.PSet(
authenticationPath = cms.untracked.string(''),
authenticationSystem = cms.untracked.int32(0),
security = cms.untracked.string(''),
messageLevel = cms.untracked.int32(0)
),
mightGet = cms.optional.untracked.vstring
)
|
[
"import FWCore.ParameterSet.Config as cms\n\nsiStripDetVOffPrinter = cms.EDAnalyzer('SiStripDetVOffPrinter',\n conditionDatabase = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),\n tagName = cms.string('SiStripDetVOff_1hourDelay_v1_Validation'),\n startTime = cms.string('2002-01-20 23:59:59.000'),\n endTime = cms.string('2002-01-20 23:59:59.000'),\n output = cms.string('PerModuleSummary.txt'),\n connect = cms.string(''),\n DBParameters = cms.PSet(\n authenticationPath = cms.untracked.string(''),\n authenticationSystem = cms.untracked.int32(0),\n security = cms.untracked.string(''),\n messageLevel = cms.untracked.int32(0)\n ),\n mightGet = cms.optional.untracked.vstring\n)\n",
"import FWCore.ParameterSet.Config as cms\nsiStripDetVOffPrinter = cms.EDAnalyzer('SiStripDetVOffPrinter',\n conditionDatabase=cms.string('frontier://FrontierProd/CMS_CONDITIONS'),\n tagName=cms.string('SiStripDetVOff_1hourDelay_v1_Validation'),\n startTime=cms.string('2002-01-20 23:59:59.000'), endTime=cms.string(\n '2002-01-20 23:59:59.000'), output=cms.string('PerModuleSummary.txt'),\n connect=cms.string(''), DBParameters=cms.PSet(authenticationPath=cms.\n untracked.string(''), authenticationSystem=cms.untracked.int32(0),\n security=cms.untracked.string(''), messageLevel=cms.untracked.int32(0)),\n mightGet=cms.optional.untracked.vstring)\n",
"<import token>\nsiStripDetVOffPrinter = cms.EDAnalyzer('SiStripDetVOffPrinter',\n conditionDatabase=cms.string('frontier://FrontierProd/CMS_CONDITIONS'),\n tagName=cms.string('SiStripDetVOff_1hourDelay_v1_Validation'),\n startTime=cms.string('2002-01-20 23:59:59.000'), endTime=cms.string(\n '2002-01-20 23:59:59.000'), output=cms.string('PerModuleSummary.txt'),\n connect=cms.string(''), DBParameters=cms.PSet(authenticationPath=cms.\n untracked.string(''), authenticationSystem=cms.untracked.int32(0),\n security=cms.untracked.string(''), messageLevel=cms.untracked.int32(0)),\n mightGet=cms.optional.untracked.vstring)\n",
"<import token>\n<assignment token>\n"
] | false |
98,355 |
86f5251f9aa7da9f79348a3e78e9f85e1d8bbae5
|
from urllib import request, error, parse
from http import cookiejar
# 指定存储cookie的文件
filename = 'cookies.txt'
# 实例化MozillaCookieJar
my_cookie = cookiejar.MozillaCookieJar(filename)
# 创建cookie管理器
my_cookie_handler = request.HTTPCookieProcessor(my_cookie)
# 创建http请求管理器
http_handler = request.HTTPHandler()
# 创建https请求管理器
https_handler = request.HTTPSHandler()
# 创建请求管理器
opener = request.build_opener(http_handler, https_handler, my_cookie_handler)
def login(url):
data = {
'name': 'zgc',
'pwd': '123456'
}
try:
data = parse.urlencode(data).encode()
req = request.Request(url, data=data)
rsp = opener.open(req)
# 将cookie保存到文件中
# ignore_discard 表示即使cookie将要丢弃,也要保存
# ignore_expires 表示即使cookie已经过期,也要保存
my_cookie.save(ignore_discard=True, ignore_expires=True)
cnt = rsp.read()
print(cnt.decode())
except error.URLError as e:
print('登录失败', e)
def get_home(url):
rsp = opener.open(url)
cnt = rsp.read()
print(cnt.decode())
if __name__ == '__main__':
url = 'http://wx.ngrok.znbest.com/test.php'
url2 = 'http://wx.ngrok.znbest.com/mine.php'
get_home(url2)
login(url)
get_home(url2)
print('*'*50)
print(my_cookie)
for item in my_cookie:
print(item)
|
[
"from urllib import request, error, parse\nfrom http import cookiejar\n\n# 指定存储cookie的文件\nfilename = 'cookies.txt'\n# 实例化MozillaCookieJar\nmy_cookie = cookiejar.MozillaCookieJar(filename)\n# 创建cookie管理器\nmy_cookie_handler = request.HTTPCookieProcessor(my_cookie)\n# 创建http请求管理器\nhttp_handler = request.HTTPHandler()\n# 创建https请求管理器\nhttps_handler = request.HTTPSHandler()\n# 创建请求管理器\nopener = request.build_opener(http_handler, https_handler, my_cookie_handler)\n\n\ndef login(url):\n\n data = {\n 'name': 'zgc',\n 'pwd': '123456'\n }\n try:\n data = parse.urlencode(data).encode()\n\n req = request.Request(url, data=data)\n\n rsp = opener.open(req)\n # 将cookie保存到文件中\n # ignore_discard 表示即使cookie将要丢弃,也要保存\n # ignore_expires 表示即使cookie已经过期,也要保存\n my_cookie.save(ignore_discard=True, ignore_expires=True)\n cnt = rsp.read()\n print(cnt.decode())\n except error.URLError as e:\n print('登录失败', e)\n\n\ndef get_home(url):\n rsp = opener.open(url)\n cnt = rsp.read()\n print(cnt.decode())\n\n\nif __name__ == '__main__':\n url = 'http://wx.ngrok.znbest.com/test.php'\n url2 = 'http://wx.ngrok.znbest.com/mine.php'\n get_home(url2)\n login(url)\n get_home(url2)\n print('*'*50)\n print(my_cookie)\n for item in my_cookie:\n print(item)\n",
"from urllib import request, error, parse\nfrom http import cookiejar\nfilename = 'cookies.txt'\nmy_cookie = cookiejar.MozillaCookieJar(filename)\nmy_cookie_handler = request.HTTPCookieProcessor(my_cookie)\nhttp_handler = request.HTTPHandler()\nhttps_handler = request.HTTPSHandler()\nopener = request.build_opener(http_handler, https_handler, my_cookie_handler)\n\n\ndef login(url):\n data = {'name': 'zgc', 'pwd': '123456'}\n try:\n data = parse.urlencode(data).encode()\n req = request.Request(url, data=data)\n rsp = opener.open(req)\n my_cookie.save(ignore_discard=True, ignore_expires=True)\n cnt = rsp.read()\n print(cnt.decode())\n except error.URLError as e:\n print('登录失败', e)\n\n\ndef get_home(url):\n rsp = opener.open(url)\n cnt = rsp.read()\n print(cnt.decode())\n\n\nif __name__ == '__main__':\n url = 'http://wx.ngrok.znbest.com/test.php'\n url2 = 'http://wx.ngrok.znbest.com/mine.php'\n get_home(url2)\n login(url)\n get_home(url2)\n print('*' * 50)\n print(my_cookie)\n for item in my_cookie:\n print(item)\n",
"<import token>\nfilename = 'cookies.txt'\nmy_cookie = cookiejar.MozillaCookieJar(filename)\nmy_cookie_handler = request.HTTPCookieProcessor(my_cookie)\nhttp_handler = request.HTTPHandler()\nhttps_handler = request.HTTPSHandler()\nopener = request.build_opener(http_handler, https_handler, my_cookie_handler)\n\n\ndef login(url):\n data = {'name': 'zgc', 'pwd': '123456'}\n try:\n data = parse.urlencode(data).encode()\n req = request.Request(url, data=data)\n rsp = opener.open(req)\n my_cookie.save(ignore_discard=True, ignore_expires=True)\n cnt = rsp.read()\n print(cnt.decode())\n except error.URLError as e:\n print('登录失败', e)\n\n\ndef get_home(url):\n rsp = opener.open(url)\n cnt = rsp.read()\n print(cnt.decode())\n\n\nif __name__ == '__main__':\n url = 'http://wx.ngrok.znbest.com/test.php'\n url2 = 'http://wx.ngrok.znbest.com/mine.php'\n get_home(url2)\n login(url)\n get_home(url2)\n print('*' * 50)\n print(my_cookie)\n for item in my_cookie:\n print(item)\n",
"<import token>\n<assignment token>\n\n\ndef login(url):\n data = {'name': 'zgc', 'pwd': '123456'}\n try:\n data = parse.urlencode(data).encode()\n req = request.Request(url, data=data)\n rsp = opener.open(req)\n my_cookie.save(ignore_discard=True, ignore_expires=True)\n cnt = rsp.read()\n print(cnt.decode())\n except error.URLError as e:\n print('登录失败', e)\n\n\ndef get_home(url):\n rsp = opener.open(url)\n cnt = rsp.read()\n print(cnt.decode())\n\n\nif __name__ == '__main__':\n url = 'http://wx.ngrok.znbest.com/test.php'\n url2 = 'http://wx.ngrok.znbest.com/mine.php'\n get_home(url2)\n login(url)\n get_home(url2)\n print('*' * 50)\n print(my_cookie)\n for item in my_cookie:\n print(item)\n",
"<import token>\n<assignment token>\n\n\ndef login(url):\n data = {'name': 'zgc', 'pwd': '123456'}\n try:\n data = parse.urlencode(data).encode()\n req = request.Request(url, data=data)\n rsp = opener.open(req)\n my_cookie.save(ignore_discard=True, ignore_expires=True)\n cnt = rsp.read()\n print(cnt.decode())\n except error.URLError as e:\n print('登录失败', e)\n\n\ndef get_home(url):\n rsp = opener.open(url)\n cnt = rsp.read()\n print(cnt.decode())\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef login(url):\n data = {'name': 'zgc', 'pwd': '123456'}\n try:\n data = parse.urlencode(data).encode()\n req = request.Request(url, data=data)\n rsp = opener.open(req)\n my_cookie.save(ignore_discard=True, ignore_expires=True)\n cnt = rsp.read()\n print(cnt.decode())\n except error.URLError as e:\n print('登录失败', e)\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,356 |
68ccb73b3d44b6bbde73609c8379a2d16b685397
|
def pick():
l = db.Collector(of_class='Level').get_first().unwrap()
t = db.Collector( \
of_category='Structural Framing', is_type=True, \
where=lambda x: x.name=="P2-C20024").get_first().unwrap()
__window__.Hide()
picked_face = uidoc.Selection.PickObject(ObjectType.Face)
picked_lines = uidoc.Selection.PickObjects(ObjectType.Element)
face = doc.GetElement(picked_face). \
GetGeometryObjectFromReference(picked_face)
lines = []
for el in picked_lines:
lines.append(doc.GetElement(el).GeometryCurve)
for el in lines:
p = Line.CreateBound(
face.Project(el.GetEndPoint(0)).XYZPoint, \
face.Project(el.GetEndPoint(1)).XYZPoint)
with db.Transaction('Create beam on Face'):
doc.Create.NewFamilyInstance(p, t, l, StructuralType.Beam)
pick()
|
[
"def pick():\r\n l = db.Collector(of_class='Level').get_first().unwrap()\r\n t = db.Collector( \\\r\n of_category='Structural Framing', is_type=True, \\\r\n where=lambda x: x.name==\"P2-C20024\").get_first().unwrap()\r\n\r\n __window__.Hide()\r\n picked_face = uidoc.Selection.PickObject(ObjectType.Face)\r\n picked_lines = uidoc.Selection.PickObjects(ObjectType.Element)\r\n face = doc.GetElement(picked_face). \\\r\n GetGeometryObjectFromReference(picked_face)\r\n lines = []\r\n for el in picked_lines:\r\n lines.append(doc.GetElement(el).GeometryCurve)\r\n\r\n for el in lines:\r\n p = Line.CreateBound(\r\n face.Project(el.GetEndPoint(0)).XYZPoint, \\\r\n face.Project(el.GetEndPoint(1)).XYZPoint)\r\n with db.Transaction('Create beam on Face'):\r\n doc.Create.NewFamilyInstance(p, t, l, StructuralType.Beam)\r\n\r\npick()",
"def pick():\n l = db.Collector(of_class='Level').get_first().unwrap()\n t = db.Collector(of_category='Structural Framing', is_type=True, where=\n lambda x: x.name == 'P2-C20024').get_first().unwrap()\n __window__.Hide()\n picked_face = uidoc.Selection.PickObject(ObjectType.Face)\n picked_lines = uidoc.Selection.PickObjects(ObjectType.Element)\n face = doc.GetElement(picked_face).GetGeometryObjectFromReference(\n picked_face)\n lines = []\n for el in picked_lines:\n lines.append(doc.GetElement(el).GeometryCurve)\n for el in lines:\n p = Line.CreateBound(face.Project(el.GetEndPoint(0)).XYZPoint, face\n .Project(el.GetEndPoint(1)).XYZPoint)\n with db.Transaction('Create beam on Face'):\n doc.Create.NewFamilyInstance(p, t, l, StructuralType.Beam)\n\n\npick()\n",
"def pick():\n l = db.Collector(of_class='Level').get_first().unwrap()\n t = db.Collector(of_category='Structural Framing', is_type=True, where=\n lambda x: x.name == 'P2-C20024').get_first().unwrap()\n __window__.Hide()\n picked_face = uidoc.Selection.PickObject(ObjectType.Face)\n picked_lines = uidoc.Selection.PickObjects(ObjectType.Element)\n face = doc.GetElement(picked_face).GetGeometryObjectFromReference(\n picked_face)\n lines = []\n for el in picked_lines:\n lines.append(doc.GetElement(el).GeometryCurve)\n for el in lines:\n p = Line.CreateBound(face.Project(el.GetEndPoint(0)).XYZPoint, face\n .Project(el.GetEndPoint(1)).XYZPoint)\n with db.Transaction('Create beam on Face'):\n doc.Create.NewFamilyInstance(p, t, l, StructuralType.Beam)\n\n\n<code token>\n",
"<function token>\n<code token>\n"
] | false |
98,357 |
885fcd46295272d344faa43904c410f606a6a468
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models, _
import odoo
import logging
from odoo.exceptions import UserError
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
import json
import io
import os
import timeit
try:
to_unicode = unicode
except NameError:
to_unicode = str
_logger = logging.getLogger(__name__)
class pos_config_image(models.Model):
_name = "pos.config.image"
_description = "Image show to customer screen"
name = fields.Char('Title', required=1)
image = fields.Binary('Image', required=1)
config_id = fields.Many2one('pos.config', 'POS config', required=1)
description = fields.Text('Description')
class pos_config(models.Model):
_inherit = "pos.config"
user_id = fields.Many2one('res.users', 'Assigned to')
config_access_right = fields.Boolean('Config access right', default=1)
allow_discount = fields.Boolean('Change discount', default=1)
allow_qty = fields.Boolean('Change quantity', default=1)
allow_price = fields.Boolean('Change price', default=1)
allow_remove_line = fields.Boolean('Remove line', default=1)
allow_numpad = fields.Boolean('Display numpad', default=1)
allow_payment = fields.Boolean('Display payment', default=1)
allow_customer = fields.Boolean('Choice customer', default=1)
allow_add_order = fields.Boolean('New order', default=1)
allow_remove_order = fields.Boolean('Remove order', default=1)
allow_add_product = fields.Boolean('Add line', default=1)
allow_lock_screen = fields.Boolean('Lock screen',
default=0,
help='When pos sessions start, cashiers required open POS viva pos pass pin (Setting/Users)')
display_point_receipt = fields.Boolean('Display point / receipt')
loyalty_id = fields.Many2one('pos.loyalty', 'Loyalty',
domain=[('state', '=', 'running')])
promotion_ids = fields.Many2many('pos.promotion',
'pos_config_promotion_rel',
'config_id',
'promotion_id',
string='Promotion programs')
promotion_manual_select = fields.Boolean('Promotion manual choice', default=0)
create_purchase_order = fields.Boolean('Create PO', default=0)
create_purchase_order_required_signature = fields.Boolean('Required signature', default=0)
purchase_order_state = fields.Selection([
('confirm_order', 'Auto confirm'),
('confirm_picking', 'Auto delivery'),
('confirm_invoice', 'Auto invoice'),
], 'PO state',
help='This is state of purchase order will process to',
default='confirm_invoice')
sync_sale_order = fields.Boolean('Sync sale orders', default=0)
sale_order = fields.Boolean('Create Sale order', default=0)
sale_order_auto_confirm = fields.Boolean('Auto confirm', default=0)
sale_order_auto_invoice = fields.Boolean('Auto paid', default=0)
sale_order_auto_delivery = fields.Boolean('Auto delivery', default=0)
pos_orders_management = fields.Boolean('POS order management', default=0)
pos_order_period_return_days = fields.Float('Return period days',
help='this is period time for customer can return order',
default=30)
display_return_days_receipt = fields.Boolean('Display return days receipt', default=0)
sync_pricelist = fields.Boolean('Sync prices list', default=0)
display_onhand = fields.Boolean('Show qty available product', default=1,
help='Display quantity on hand all products on pos screen')
large_stocks = fields.Boolean('Large stock', help='If count products bigger than 100,000 rows, please check it')
allow_order_out_of_stock = fields.Boolean('Allow out-of-stock', default=1,
help='If checked, allow cashier can add product have out of stock')
allow_of_stock_approve_by_admin = fields.Boolean('Approve allow of stock',
help='Allow manager approve allow of stock')
print_voucher = fields.Boolean('Print vouchers', help='Reprint last vouchers', default=1)
scan_voucher = fields.Boolean('Scan voucher', default=0)
expired_days_voucher = fields.Integer('Expired days of voucher', default=30,
help='Total days keep voucher can use, if out of period days from create date, voucher will expired')
sync_multi_session = fields.Boolean('Sync multi session', default=0)
bus_id = fields.Many2one('pos.bus', string='Branch/store')
display_person_add_line = fields.Boolean('Display information line', default=0,
help="When you checked, on pos order lines screen, will display information person created order (lines) Eg: create date, updated date ..")
quickly_payment = fields.Boolean('Quickly payment', default=0)
internal_transfer = fields.Boolean('Internal transfer', default=0,
help='Go Inventory and active multi warehouse and location')
internal_transfer_auto_validate = fields.Boolean('Internal transfer auto validate', default=0)
discount = fields.Boolean('Global discount', default=0)
discount_ids = fields.Many2many('pos.global.discount',
'pos_config_pos_global_discount_rel',
'config_id',
'discount_id',
'Global discounts')
is_customer_screen = fields.Boolean('Is customer screen')
delay = fields.Integer('Delay time', default=3000)
slogan = fields.Char('Slogan', help='This is message will display on screen of customer')
image_ids = fields.One2many('pos.config.image', 'config_id', 'Images')
tooltip = fields.Boolean('Show information of product', default=0)
tooltip_show_last_price = fields.Boolean('Show last price of product',
help='Show last price of items of customer have bought before',
default=0)
tooltip_show_minimum_sale_price = fields.Boolean('Show min of product sale price',
help='Show minimum sale price of product',
default=0)
discount_limit = fields.Boolean('Discount limit', default=0)
discount_limit_amount = fields.Float('Discount limit amount', default=10)
discount_each_line = fields.Boolean('Discount each line')
discount_unlock_limit = fields.Boolean('Manager can unlock limit')
discount_unlock_limit_user_id = fields.Many2one('res.users', 'User unlock limit amount')
multi_currency = fields.Boolean('Multi currency', default=0)
multi_currency_update_rate = fields.Boolean('Update rate', default=0)
notify_alert = fields.Boolean('Notify alert',
help='Turn on/off notification alert on POS sessions.',
default=0)
return_products = fields.Boolean('Return orders',
help='Allow cashier return orders, return products',
default=0)
receipt_without_payment_template = fields.Selection([
('none', 'None'),
('display_price', 'Display price'),
('not_display_price', 'Not display price')
], default='not_display_price', string='Receipt without payment template')
lock_order_printed_receipt = fields.Boolean('Lock order printed receipt', default=0)
staff_level = fields.Selection([
('manual', 'Manual config'),
('marketing', 'Marketing'),
('waiter', 'Waiter'),
('cashier', 'Cashier'),
('manager', 'Manager')
], string='Staff level', default='manual')
validate_payment = fields.Boolean('Validate payment')
validate_remove_order = fields.Boolean('Validate remove order')
validate_change_minus = fields.Boolean('Validate pressed +/-')
validate_quantity_change = fields.Boolean('Validate quantity change')
validate_price_change = fields.Boolean('Validate price change')
validate_discount_change = fields.Boolean('Validate discount change')
validate_close_session = fields.Boolean('Validate close session')
validate_by_user_id = fields.Many2one('res.users', 'Validate by admin')
apply_validate_return_mode = fields.Boolean('Validate return mode',
help='If checked, only applied validate when return order', default=1)
print_user_card = fields.Boolean('Print user card')
product_operation = fields.Boolean('Product Operation', default=0,
help='Allow cashiers add pos categories and products on pos screen')
quickly_payment_full = fields.Boolean('Quickly payment full')
quickly_payment_full_journal_id = fields.Many2one('account.journal', 'Payment mode',
domain=[('journal_user', '=', True)])
daily_report = fields.Boolean('Daily report', default=0)
note_order = fields.Boolean('Note order', default=0)
note_orderline = fields.Boolean('Note order line', default=0)
signature_order = fields.Boolean('Signature order', default=0)
quickly_buttons = fields.Boolean('Quickly Actions', default=0)
display_amount_discount = fields.Boolean('Display amount discount', default=0)
booking_orders = fields.Boolean('Booking orders', default=0)
booking_orders_required_cashier_signature = fields.Boolean('Book order required sessions signature',
help='Checked if need required pos seller signature',
default=0)
booking_orders_alert = fields.Boolean('Alert when new order coming', default=0)
delivery_orders = fields.Boolean('Delivery orders',
help='Pos clients can get booking orders and delivery orders',
default=0)
booking_orders_display_shipping_receipt = fields.Boolean('Display shipping on receipt', default=0)
display_tax_orderline = fields.Boolean('Display tax orderline', default=0)
display_tax_receipt = fields.Boolean('Display tax receipt', default=0)
display_fiscal_position_receipt = fields.Boolean('Display fiscal position on receipt', default=0)
display_image_orderline = fields.Boolean('Display image order line', default=0)
display_image_receipt = fields.Boolean('Display image receipt', default=0)
duplicate_receipt = fields.Boolean('Duplicate Receipt')
print_number = fields.Integer('Print number', help='How many number receipt need to print at printer ?', default=0)
lock_session = fields.Boolean('Lock session', default=0)
category_wise_receipt = fields.Boolean('Category wise receipt', default=0)
management_invoice = fields.Boolean('Management Invoice', default=0)
invoice_journal_ids = fields.Many2many(
'account.journal',
'pos_config_invoice_journal_rel',
'config_id',
'journal_id',
'Accounting Invoice Journal',
domain=[('type', '=', 'sale')],
help="Accounting journal use for create invoices.")
send_invoice_email = fields.Boolean('Send email invoice', help='Help cashier send invoice to email of customer',
default=0)
lock_print_invoice_on_pos = fields.Boolean('Lock print invoice',
help='Lock print pdf invoice when clicked button invoice', default=0)
pos_auto_invoice = fields.Boolean('Auto create invoice',
help='Automatic create invoice if order have client',
default=0)
receipt_invoice_number = fields.Boolean('Add invoice on receipt', help='Show invoice number on receipt header',
default=0)
receipt_customer_vat = fields.Boolean('Add vat customer on receipt',
help='Show customer VAT(TIN) on receipt header', default=0)
auto_register_payment = fields.Boolean('Auto invocie register payment', default=0)
fiscal_position_auto_detect = fields.Boolean('Fiscal position auto detect', default=0)
display_sale_price_within_tax = fields.Boolean('Display sale price within tax', default=0)
display_cost_price = fields.Boolean('Display product cost price', default=0)
display_product_ref = fields.Boolean('Display product ref', default=0)
multi_location = fields.Boolean('Multi location', default=0)
product_view = fields.Selection([
('box', 'Box view'),
('list', 'List view'),
], default='box', string='View of products screen', required=1)
ticket_font_size = fields.Integer('Ticket font size', default=12)
customer_default_id = fields.Many2one('res.partner', 'Customer default')
medical_insurance = fields.Boolean('Medical insurance', default=0)
set_guest = fields.Boolean('Set guest', default=0)
reset_sequence = fields.Boolean('Reset sequence order', default=0)
update_tax = fields.Boolean('Modify tax', default=0, help='Cashier can change tax of order line')
subtotal_tax_included = fields.Boolean('Show Tax-Included Prices',
help='When checked, subtotal of line will display amount have tax-included')
cash_out = fields.Boolean('Take money out', default=0, help='Allow cashiers take money out')
cash_in = fields.Boolean('Push money in', default=0, help='Allow cashiers input money in')
min_length_search = fields.Integer('Min character length search', default=3,
help='Allow auto suggestion items when cashiers input on search box')
review_receipt_before_paid = fields.Boolean('Review receipt before paid', help='Show receipt before paid order',
default=1)
keyboard_event = fields.Boolean('Keyboard event', default=0, help='Allow cashiers use shortcut keyboard')
multi_variant = fields.Boolean('Multi variant', default=0,
help='Allow cashiers change variant of order lines on pos screen')
switch_user = fields.Boolean('Switch user', default=0, help='Allow cashiers switch to another cashier')
change_unit_of_measure = fields.Boolean('Change unit of measure', default=0,
help='Allow cashiers change unit of measure of order lines')
print_last_order = fields.Boolean('Print last receipt', default=0, help='Allow cashiers print last receipt')
close_session = fields.Boolean('Close session', help='When cashiers click close pos, auto log out of system',
default=0)
display_image_product = fields.Boolean('Display image product', default=1,
help='Allow hide/display product images on pos screen')
printer_on_off = fields.Boolean('On/Off printer', help='Help cashier turn on/off printer viva posbox', default=0)
check_duplicate_email = fields.Boolean('Check duplicate email', default=0)
check_duplicate_phone = fields.Boolean('Check duplicate phone', default=0)
hide_country = fields.Boolean('Hide country', default=0)
hide_barcode = fields.Boolean('Hide barcode', default=0)
hide_tax = fields.Boolean('Hide tax', default=0)
hide_pricelist = fields.Boolean('Hide pricelists', default=0)
hide_supplier = fields.Boolean('Hide suppiers', default=1)
auto_remove_line = fields.Boolean('Auto remove line',
default=1,
help='When cashier set quantity of line to 0, line auto remove not keep line with qty is 0')
chat = fields.Boolean('Chat message', default=0, help='Allow chat, discuss between pos sessions')
add_tags = fields.Boolean('Add tags line', default=0, help='Allow cashiers add tags to order lines')
add_notes = fields.Boolean('Add notes line', default=0, help='Allow cashiers add notes to order lines')
add_sale_person = fields.Boolean('Add sale person', default=0)
logo = fields.Binary('Logo of store')
paid_full = fields.Boolean('Allow paid full', default=0,
help='Allow cashiers click one button, do payment full order')
paid_partial = fields.Boolean('Allow partial payment', default=0, help='Allow cashiers do partial payment')
backup = fields.Boolean('Backup/Restore orders', default=0,
help='Allow cashiers backup and restore orders on pos screen')
backup_orders = fields.Text('Backup orders')
change_logo = fields.Boolean('Change logo', default=1, help='Allow cashiers change logo of shop on pos screen')
management_session = fields.Boolean('Management session', default=0)
barcode_receipt = fields.Boolean('Barcode receipt', default=0)
hide_mobile = fields.Boolean('Hide mobile', default=1)
hide_phone = fields.Boolean('Hide phone', default=1)
hide_email = fields.Boolean('Hide email', default=1)
update_client = fields.Boolean('Update client',
help='Uncheck if you dont want cashier change customer information on pos')
add_client = fields.Boolean('Add client', help='Uncheck if you dont want cashier add new customers on pos')
remove_client = fields.Boolean('Remove client', help='Uncheck if you dont want cashier remove customers on pos')
mobile_responsive = fields.Boolean('Mobile responsive', default=0)
hide_amount_total = fields.Boolean('Hide amount total', default=1)
hide_amount_taxes = fields.Boolean('Hide amount taxes', default=1)
report_no_of_report = fields.Integer(string="No.of Copy Receipt", default=1)
report_signature = fields.Boolean(string="Report Signature", default=1)
report_product_summary = fields.Boolean(string="Report Product Summary", default=1)
report_product_current_month_date = fields.Boolean(string="Report This Month", default=1)
report_order_summary = fields.Boolean(string='Report Order Summary', default=1)
report_order_current_month_date = fields.Boolean(string="Report Current Month", default=1)
report_payment_summary = fields.Boolean(string="Report Payment Summary", default=1)
report_payment_current_month_date = fields.Boolean(string="Payment Current Month", default=1)
active_product_sort_by = fields.Boolean('Active product sort by', default=1)
default_product_sort_by = fields.Selection([
('a_z', 'Sort from A to Z'),
('z_a', 'Sort from Z to A'),
('low_price', 'Sort from low to high price'),
('high_price', 'Sort from high to low price'),
('pos_sequence', 'Product pos sequence')
], string='Default sort by', default='a_z')
sale_extra = fields.Boolean('Sale extra', default=1)
required_add_customer_before_put_product_to_cart = fields.Boolean('Required add customer first',
help='If you checked on this checkbox, in POS always required cashier add customer the first')
only_one_time_add_customer = fields.Boolean('Only one time add customer',
help='Each orders, only one time add customer')
use_parameters = fields.Boolean('Use parameters', help='POS need only one time save parameter datas use on POS, and next times no need call backend', default=1)
time_refresh_parameter = fields.Integer('Time refresh datas (seconds)', help='Time for refresh parameters data', default=30)
@api.model
def switch_mobile_mode(self, config_id, vals):
if vals.get('mobile_responsive') == True:
vals['product_view'] = 'box'
return self.browse(config_id).sudo().write(vals)
@api.multi
def remove_database(self):
for config in self:
sessions = self.env['pos.session'].search([('config_id', '=', config.id)])
for session in sessions:
self.env['bus.bus'].sendmany(
[[(self.env.cr.dbname, 'pos.indexed_db', session.user_id.id), json.dumps({
'db': self.env.cr.dbname
})]])
self.env['pos.cache.database'].search([]).unlink()
self.env['pos.call.log'].search([]).unlink()
return {
'type': 'ir.actions.act_url',
'url': '/pos/web/',
'target': 'self',
}
@api.multi
def remove_caches(self):
for config in self:
sessions = self.env['pos.session'].search([('config_id', '=', config.id)])
for session in sessions:
self.env['bus.bus'].sendmany(
[[(self.env.cr.dbname, 'pos.indexed_db', session.user_id.id), json.dumps({
'db': self.env.cr.dbname
})]])
if session.state != 'closed':
session.action_pos_session_closing_control()
return {
'type': 'ir.actions.act_url',
'url': '/pos/web/',
'target': 'self',
}
@api.model
def store_cached_file(self, datas):
start = timeit.default_timer()
_logger.info('==> begin cached_file')
os.chdir(os.path.dirname(__file__))
path = os.getcwd()
file_name = path + '/pos.json'
if os.path.exists(file_name):
os.remove(file_name)
with io.open(file_name, 'w', encoding='utf8') as outfile:
str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(',', ': '), ensure_ascii=False)
outfile.write(to_unicode(str_))
stop = timeit.default_timer()
_logger.info(stop - start)
return True
@api.model
def get_cached_file(self):
start = timeit.default_timer()
_logger.info('==> begin get_cached_file')
os.chdir(os.path.dirname(__file__))
path = os.getcwd()
file_name = path + '/pos.json'
if not os.path.exists(file_name):
return False
else:
with open(file_name) as f:
datas = json.load(f)
stop = timeit.default_timer()
_logger.info(stop - start)
return datas
def get_fields_by_model(self, model):
all_fields = self.env[model].fields_get()
fields_list = []
for field, value in all_fields.items():
if field == 'model' or all_fields[field]['type'] in ['one2many', 'binary']:
continue
else:
fields_list.append(field)
return fields_list
@api.model
def install_data(self, model_name=None, min_id=0, max_id=1999):
cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields=False)
log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)
domain = [('id', '>=', min_id), ('id', '<=', max_id)]
if model_name == 'product.product':
domain.append(('available_in_pos', '=', True))
field_list = cache_obj.get_fields_by_model(model_name)
self.env.cr.execute("select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'" % (
min_id, max_id, model_name))
old_logs = self.env.cr.fetchall()
datas = None
if len(old_logs) == 0:
_logger.info('installing %s from %s to %s' % (model_name, min_id, max_id))
datas = self.env[model_name].with_context(prefetch_fields=False).search_read(domain, field_list)
version_info = odoo.release.version_info[0]
if version_info == 12:
all_fields = self.env[model_name].fields_get()
for data in datas:
for field, value in data.items():
if field == 'model':
continue
if all_fields[field] and all_fields[field]['type'] in ['date',
'datetime'] and value:
data[field] = value.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
vals = {
'active': True,
'min_id': min_id,
'max_id': max_id,
'call_fields': json.dumps(field_list),
'call_results': json.dumps(datas),
'call_model': model_name,
'call_domain': json.dumps(domain),
}
log_obj.create(vals)
else:
old_log_id = old_logs[0][0]
old_log = log_obj.browse(old_log_id)
datas = old_log.call_results
self.env.cr.commit()
return datas
@api.onchange('lock_print_invoice_on_pos')
def _onchange_lock_print_invoice_on_pos(self):
if self.lock_print_invoice_on_pos == True:
self.receipt_invoice_number = False
self.send_invoice_email = True
else:
self.receipt_invoice_number = True
self.send_invoice_email = False
@api.onchange('receipt_invoice_number')
def _onchange_receipt_invoice_number(self):
if self.receipt_invoice_number == True:
self.lock_print_invoice_on_pos = False
else:
self.lock_print_invoice_on_pos = True
@api.onchange('pos_auto_invoice')
def _onchange_pos_auto_invoice(self):
if self.pos_auto_invoice == True:
self.iface_invoicing = True
else:
self.iface_invoicing = False
@api.onchange('staff_level')
def on_change_staff_level(self):
if self.staff_level and self.staff_level == 'manager':
self.lock_order_printed_receipt = False
@api.multi
def write(self, vals):
if vals.get('allow_discount', False) or vals.get('allow_qty', False) or vals.get('allow_price', False):
vals['allow_numpad'] = True
if vals.get('expired_days_voucher', None) and vals.get('expired_days_voucher') < 0:
raise UserError('Expired days of voucher could not smaller than 0')
for config in self:
if vals.get('management_session', False) and not vals.get('default_cashbox_lines_ids'):
if not config.default_cashbox_lines_ids and not config.cash_control:
raise UserError('Please go to Cash control and add Default Opening')
res = super(pos_config, self).write(vals)
for config in self:
if config.validate_by_user_id and not config.validate_by_user_id.pos_security_pin:
raise UserError(
'Validate user %s have not set pos security pin, please go to Users menu and input security password' % (
config.validate_by_user_id.name))
if config.discount_unlock_limit_user_id and not config.discount_unlock_limit_user_id.pos_security_pin:
raise UserError(
'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password' % (
config.discount_unlock_limit_user_id.name))
return res
@api.model
def create(self, vals):
if vals.get('allow_discount', False) or vals.get('allow_qty', False) or vals.get('allow_price', False):
vals['allow_numpad'] = True
if vals.get('expired_days_voucher', 0) < 0:
raise UserError('Expired days of voucher could not smaller than 0')
config = super(pos_config, self).create(vals)
if config.management_session and not config.default_cashbox_lines_ids and not config.cash_control:
raise UserError('Please go to Cash control and add Default Opening')
if config.validate_by_user_id and not config.validate_by_user_id.pos_security_pin:
raise UserError(
'Validate user %s have not set pos security pin, please go to Users menu and input security password' % (
config.validate_by_user_id.name))
if config.discount_unlock_limit_user_id and not config.discount_unlock_limit_user_id.pos_security_pin:
raise UserError(
'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password' % (
config.discount_unlock_limit_user_id.name))
return config
def init_wallet_journal(self):
Journal = self.env['account.journal']
user = self.env.user
wallet_journal = Journal.sudo().search([
('code', '=', 'UWJ'),
('company_id', '=', user.company_id.id),
])
if wallet_journal:
return wallet_journal.sudo().write({
'pos_method_type': 'wallet'
})
Account = self.env['account.account']
wallet_account_old_version = Account.sudo().search([
('code', '=', 'AUW'), ('company_id', '=', user.company_id.id)])
if wallet_account_old_version:
wallet_account = wallet_account_old_version[0]
else:
wallet_account = Account.sudo().create({
'name': 'Account wallet',
'code': 'AUW',
'user_type_id': self.env.ref('account.data_account_type_current_assets').id,
'company_id': user.company_id.id,
'note': 'code "AUW" auto give wallet amount of customers',
})
self.env['ir.model.data'].sudo().create({
'name': 'account_use_wallet' + str(user.company_id.id),
'model': 'account.account',
'module': 'pos_retail',
'res_id': wallet_account.id,
'noupdate': True, # If it's False, target record (res_id) will be removed while module update
})
wallet_journal_inactive = Journal.sudo().search([
('code', '=', 'UWJ'),
('company_id', '=', user.company_id.id),
('pos_method_type', '=', 'wallet')
])
if wallet_journal_inactive:
wallet_journal_inactive.sudo().write({
'default_debit_account_id': wallet_account.id,
'default_credit_account_id': wallet_account.id,
'pos_method_type': 'wallet',
'sequence': 100,
})
wallet_journal = wallet_journal_inactive
else:
new_sequence = self.env['ir.sequence'].sudo().create({
'name': 'Account Default Wallet Journal ' + str(user.company_id.id),
'padding': 3,
'prefix': 'UW ' + str(user.company_id.id),
})
self.env['ir.model.data'].sudo().create({
'name': 'journal_sequence' + str(new_sequence.id),
'model': 'ir.sequence',
'module': 'pos_retail',
'res_id': new_sequence.id,
'noupdate': True,
})
wallet_journal = Journal.sudo().create({
'name': 'Wallet',
'code': 'UWJ',
'type': 'cash',
'pos_method_type': 'wallet',
'journal_user': True,
'sequence_id': new_sequence.id,
'company_id': user.company_id.id,
'default_debit_account_id': wallet_account.id,
'default_credit_account_id': wallet_account.id,
'sequence': 100,
})
self.env['ir.model.data'].sudo().create({
'name': 'use_wallet_journal_' + str(wallet_journal.id),
'model': 'account.journal',
'module': 'pos_retail',
'res_id': int(wallet_journal.id),
'noupdate': True,
})
config = self
config.sudo().write({
'journal_ids': [(4, wallet_journal.id)],
})
statement = [(0, 0, {
'journal_id': wallet_journal.id,
'user_id': user.id,
'company_id': user.company_id.id
})]
current_session = config.current_session_id
current_session.sudo().write({
'statement_ids': statement,
})
return
def init_voucher_journal(self):
Journal = self.env['account.journal']
user = self.env.user
voucher_journal = Journal.sudo().search([
('code', '=', 'VCJ'),
('company_id', '=', user.company_id.id),
])
if voucher_journal:
return voucher_journal.sudo().write({
'pos_method_type': 'voucher'
})
Account = self.env['account.account']
voucher_account_old_version = Account.sudo().search([
('code', '=', 'AVC'), ('company_id', '=', user.company_id.id)])
if voucher_account_old_version:
voucher_account = voucher_account_old_version[0]
else:
voucher_account = Account.sudo().create({
'name': 'Account voucher',
'code': 'AVC',
'user_type_id': self.env.ref('account.data_account_type_current_assets').id,
'company_id': user.company_id.id,
'note': 'code "AVC" auto give voucher histories of customers',
})
self.env['ir.model.data'].sudo().create({
'name': 'account_voucher' + str(user.company_id.id),
'model': 'account.account',
'module': 'pos_retail',
'res_id': voucher_account.id,
'noupdate': True, # If it's False, target record (res_id) will be removed while module update
})
voucher_journal = Journal.sudo().search([
('code', '=', 'VCJ'),
('company_id', '=', user.company_id.id),
('pos_method_type', '=', 'voucher')
])
if voucher_journal:
voucher_journal[0].sudo().write({
'voucher': True,
'default_debit_account_id': voucher_account.id,
'default_credit_account_id': voucher_account.id,
'pos_method_type': 'voucher',
'sequence': 101,
})
voucher_journal = voucher_journal[0]
else:
new_sequence = self.env['ir.sequence'].sudo().create({
'name': 'Account Voucher ' + str(user.company_id.id),
'padding': 3,
'prefix': 'AVC ' + str(user.company_id.id),
})
self.env['ir.model.data'].sudo().create({
'name': 'journal_sequence' + str(new_sequence.id),
'model': 'ir.sequence',
'module': 'pos_retail',
'res_id': new_sequence.id,
'noupdate': True,
})
voucher_journal = Journal.sudo().create({
'name': 'Voucher',
'code': 'VCJ',
'type': 'cash',
'pos_method_type': 'voucher',
'journal_user': True,
'sequence_id': new_sequence.id,
'company_id': user.company_id.id,
'default_debit_account_id': voucher_account.id,
'default_credit_account_id': voucher_account.id,
'sequence': 101,
})
self.env['ir.model.data'].sudo().create({
'name': 'journal_voucher_' + str(voucher_journal.id),
'model': 'account.journal',
'module': 'pos_retail',
'res_id': int(voucher_journal.id),
'noupdate': True,
})
config = self
config.sudo().write({
'journal_ids': [(4, voucher_journal.id)],
})
statement = [(0, 0, {
'journal_id': voucher_journal.id,
'user_id': user.id,
'company_id': user.company_id.id
})]
current_session = config.current_session_id
current_session.sudo().write({
'statement_ids': statement,
})
return
def init_credit_journal(self):
Journal = self.env['account.journal']
user = self.env.user
voucher_journal = Journal.sudo().search([
('code', '=', 'CJ'),
('company_id', '=', user.company_id.id),
])
if voucher_journal:
return voucher_journal.sudo().write({
'pos_method_type': 'credit'
})
Account = self.env['account.account']
credit_account_old_version = Account.sudo().search([
('code', '=', 'ACJ'), ('company_id', '=', user.company_id.id)])
if credit_account_old_version:
credit_account = credit_account_old_version[0]
else:
credit_account = Account.sudo().create({
'name': 'Credit Account',
'code': 'CA',
'user_type_id': self.env.ref('account.data_account_type_current_assets').id,
'company_id': user.company_id.id,
'note': 'code "CA" give credit payment customer',
})
self.env['ir.model.data'].sudo().create({
'name': 'account_credit' + str(user.company_id.id),
'model': 'account.account',
'module': 'pos_retail',
'res_id': credit_account.id,
'noupdate': True, # If it's False, target record (res_id) will be removed while module update
})
credit_journal = Journal.sudo().search([
('code', '=', 'CJ'),
('company_id', '=', user.company_id.id),
('pos_method_type', '=', 'credit')
])
if credit_journal:
credit_journal[0].sudo().write({
'credit': True,
'default_debit_account_id': credit_account.id,
'default_credit_account_id': credit_account.id,
'pos_method_type': 'credit',
'sequence': 102,
})
credit_journal = credit_journal[0]
else:
new_sequence = self.env['ir.sequence'].sudo().create({
'name': 'Credit account ' + str(user.company_id.id),
'padding': 3,
'prefix': 'CA ' + str(user.company_id.id),
})
self.env['ir.model.data'].sudo().create({
'name': 'journal_sequence' + str(new_sequence.id),
'model': 'ir.sequence',
'module': 'pos_retail',
'res_id': new_sequence.id,
'noupdate': True,
})
credit_journal = Journal.sudo().create({
'name': 'Customer Credit',
'code': 'CJ',
'type': 'cash',
'pos_method_type': 'credit',
'journal_user': True,
'sequence_id': new_sequence.id,
'company_id': user.company_id.id,
'default_debit_account_id': credit_account.id,
'default_credit_account_id': credit_account.id,
'sequence': 102,
})
self.env['ir.model.data'].sudo().create({
'name': 'credit_journal_' + str(credit_journal.id),
'model': 'account.journal',
'module': 'pos_retail',
'res_id': int(credit_journal.id),
'noupdate': True,
})
config = self
config.sudo().write({
'journal_ids': [(4, credit_journal.id)],
})
statement = [(0, 0, {
'journal_id': credit_journal.id,
'user_id': user.id,
'company_id': user.company_id.id
})]
current_session = config.current_session_id
current_session.sudo().write({
'statement_ids': statement,
})
return True
def init_return_order_journal(self):
Journal = self.env['account.journal']
user = self.env.user
return_journal = Journal.sudo().search([
('code', '=', 'ROJ'),
('company_id', '=', user.company_id.id),
])
if return_journal:
return return_journal.sudo().write({
'pos_method_type': 'return'
})
Account = self.env['account.account']
return_account_old_version = Account.sudo().search([
('code', '=', 'ARO'), ('company_id', '=', user.company_id.id)])
if return_account_old_version:
return_account = return_account_old_version[0]
else:
return_account = Account.sudo().create({
'name': 'Return Order Account',
'code': 'ARO',
'user_type_id': self.env.ref('account.data_account_type_current_assets').id,
'company_id': user.company_id.id,
'note': 'code "ARO" give return order from customer',
})
self.env['ir.model.data'].sudo().create({
'name': 'return_account' + str(user.company_id.id),
'model': 'account.account',
'module': 'pos_retail',
'res_id': return_account.id,
'noupdate': True, # If it's False, target record (res_id) will be removed while module update
})
return_journal = Journal.sudo().search([
('code', '=', 'ROJ'),
('company_id', '=', user.company_id.id),
])
if return_journal:
return_journal[0].sudo().write({
'default_debit_account_id': return_account.id,
'default_credit_account_id': return_account.id,
'pos_method_type': 'return'
})
return_journal = return_journal[0]
else:
new_sequence = self.env['ir.sequence'].sudo().create({
'name': 'Return account ' + str(user.company_id.id),
'padding': 3,
'prefix': 'RA ' + str(user.company_id.id),
})
self.env['ir.model.data'].sudo().create({
'name': 'journal_sequence' + str(new_sequence.id),
'model': 'ir.sequence',
'module': 'pos_retail',
'res_id': new_sequence.id,
'noupdate': True,
})
return_journal = Journal.sudo().create({
'name': 'Return Order Customer',
'code': 'ROJ',
'type': 'cash',
'pos_method_type': 'return',
'journal_user': True,
'sequence_id': new_sequence.id,
'company_id': user.company_id.id,
'default_debit_account_id': return_account.id,
'default_credit_account_id': return_account.id,
'sequence': 103,
})
self.env['ir.model.data'].sudo().create({
'name': 'return_journal_' + str(return_journal.id),
'model': 'account.journal',
'module': 'pos_retail',
'res_id': int(return_journal.id),
'noupdate': True,
})
config = self
config.sudo().write({
'journal_ids': [(4, return_journal.id)],
})
statement = [(0, 0, {
'journal_id': return_journal.id,
'user_id': user.id,
'company_id': user.company_id.id
})]
current_session = config.current_session_id
current_session.sudo().write({
'statement_ids': statement,
})
return True
def init_rounding_journal(self):
Journal = self.env['account.journal']
Account = self.env['account.account']
user = self.env.user
rounding_journal = Journal.sudo().search([
('code', '=', 'RDJ'),
('company_id', '=', user.company_id.id),
])
if rounding_journal:
return rounding_journal.sudo().write({
'pos_method_type': 'rounding'
})
rounding_account_old_version = Account.sudo().search([
('code', '=', 'AAR'), ('company_id', '=', user.company_id.id)])
if rounding_account_old_version:
rounding_account = rounding_account_old_version[0]
else:
_logger.info('rounding_account have not')
rounding_account = Account.sudo().create({
'name': 'Rounding Account',
'code': 'AAR',
'user_type_id': self.env.ref('account.data_account_type_current_assets').id,
'company_id': user.company_id.id,
'note': 'code "AAR" give rounding pos order',
})
self.env['ir.model.data'].sudo().create({
'name': 'rounding_account' + str(user.company_id.id),
'model': 'account.account',
'module': 'pos_retail',
'res_id': rounding_account.id,
'noupdate': True,
})
rounding_journal = Journal.sudo().search([
('pos_method_type', '=', 'rounding'),
('company_id', '=', user.company_id.id),
])
if rounding_journal:
rounding_journal[0].sudo().write({
'name': 'Rounding',
'default_debit_account_id': rounding_account.id,
'default_credit_account_id': rounding_account.id,
'pos_method_type': 'rounding',
'code': 'RDJ'
})
rounding_journal = rounding_journal[0]
else:
new_sequence = self.env['ir.sequence'].sudo().create({
'name': 'rounding account ' + str(user.company_id.id),
'padding': 3,
'prefix': 'RA ' + str(user.company_id.id),
})
self.env['ir.model.data'].sudo().create({
'name': 'journal_sequence' + str(new_sequence.id),
'model': 'ir.sequence',
'module': 'pos_retail',
'res_id': new_sequence.id,
'noupdate': True,
})
rounding_journal = Journal.sudo().create({
'name': 'Rounding',
'code': 'RDJ',
'type': 'cash',
'pos_method_type': 'rounding',
'journal_user': True,
'sequence_id': new_sequence.id,
'company_id': user.company_id.id,
'default_debit_account_id': rounding_account.id,
'default_credit_account_id': rounding_account.id,
'sequence': 103,
})
self.env['ir.model.data'].sudo().create({
'name': 'rounding_journal_' + str(rounding_journal.id),
'model': 'account.journal',
'module': 'pos_retail',
'res_id': int(rounding_journal.id),
'noupdate': True,
})
config = self
config.sudo().write({
'journal_ids': [(4, rounding_journal.id)],
})
statement = [(0, 0, {
'journal_id': rounding_journal.id,
'user_id': user.id,
'company_id': user.company_id.id
})]
current_session = config.current_session_id
current_session.sudo().write({
'statement_ids': statement,
})
return True
@api.multi
def open_ui(self):
res = super(pos_config, self).open_ui()
self.init_voucher_journal()
self.init_wallet_journal()
self.init_credit_journal()
self.init_return_order_journal()
self.init_rounding_journal()
return res
@api.multi
def open_session_cb(self):
res = super(pos_config, self).open_session_cb()
self.init_voucher_journal()
self.init_wallet_journal()
self.init_credit_journal()
self.init_return_order_journal()
self.init_rounding_journal()
return res
|
[
"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models, _\nimport odoo\nimport logging\nfrom odoo.exceptions import UserError\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT\nimport json\n\nimport io\nimport os\nimport timeit\n\ntry:\n to_unicode = unicode\nexcept NameError:\n to_unicode = str\n\n_logger = logging.getLogger(__name__)\n\n\nclass pos_config_image(models.Model):\n _name = \"pos.config.image\"\n _description = \"Image show to customer screen\"\n\n name = fields.Char('Title', required=1)\n image = fields.Binary('Image', required=1)\n config_id = fields.Many2one('pos.config', 'POS config', required=1)\n description = fields.Text('Description')\n\n\nclass pos_config(models.Model):\n _inherit = \"pos.config\"\n\n user_id = fields.Many2one('res.users', 'Assigned to')\n config_access_right = fields.Boolean('Config access right', default=1)\n allow_discount = fields.Boolean('Change discount', default=1)\n allow_qty = fields.Boolean('Change quantity', default=1)\n allow_price = fields.Boolean('Change price', default=1)\n allow_remove_line = fields.Boolean('Remove line', default=1)\n allow_numpad = fields.Boolean('Display numpad', default=1)\n allow_payment = fields.Boolean('Display payment', default=1)\n allow_customer = fields.Boolean('Choice customer', default=1)\n allow_add_order = fields.Boolean('New order', default=1)\n allow_remove_order = fields.Boolean('Remove order', default=1)\n allow_add_product = fields.Boolean('Add line', default=1)\n\n allow_lock_screen = fields.Boolean('Lock screen',\n default=0,\n help='When pos sessions start, cashiers required open POS viva pos pass pin (Setting/Users)')\n\n display_point_receipt = fields.Boolean('Display point / receipt')\n loyalty_id = fields.Many2one('pos.loyalty', 'Loyalty',\n domain=[('state', '=', 'running')])\n\n promotion_ids = fields.Many2many('pos.promotion',\n 'pos_config_promotion_rel',\n 'config_id',\n 'promotion_id',\n string='Promotion programs')\n promotion_manual_select = fields.Boolean('Promotion manual choice', default=0)\n\n create_purchase_order = fields.Boolean('Create PO', default=0)\n create_purchase_order_required_signature = fields.Boolean('Required signature', default=0)\n purchase_order_state = fields.Selection([\n ('confirm_order', 'Auto confirm'),\n ('confirm_picking', 'Auto delivery'),\n ('confirm_invoice', 'Auto invoice'),\n ], 'PO state',\n help='This is state of purchase order will process to',\n default='confirm_invoice')\n\n sync_sale_order = fields.Boolean('Sync sale orders', default=0)\n sale_order = fields.Boolean('Create Sale order', default=0)\n sale_order_auto_confirm = fields.Boolean('Auto confirm', default=0)\n sale_order_auto_invoice = fields.Boolean('Auto paid', default=0)\n sale_order_auto_delivery = fields.Boolean('Auto delivery', default=0)\n\n pos_orders_management = fields.Boolean('POS order management', default=0)\n pos_order_period_return_days = fields.Float('Return period days',\n help='this is period time for customer can return order',\n default=30)\n display_return_days_receipt = fields.Boolean('Display return days receipt', default=0)\n\n sync_pricelist = fields.Boolean('Sync prices list', default=0)\n\n display_onhand = fields.Boolean('Show qty available product', default=1,\n help='Display quantity on hand all products on pos screen')\n large_stocks = fields.Boolean('Large stock', help='If count products bigger than 100,000 rows, please check it')\n allow_order_out_of_stock = fields.Boolean('Allow out-of-stock', default=1,\n help='If checked, allow cashier can add product have out of stock')\n allow_of_stock_approve_by_admin = fields.Boolean('Approve allow of stock',\n help='Allow manager approve allow of stock')\n\n print_voucher = fields.Boolean('Print vouchers', help='Reprint last vouchers', default=1)\n scan_voucher = fields.Boolean('Scan voucher', default=0)\n expired_days_voucher = fields.Integer('Expired days of voucher', default=30,\n help='Total days keep voucher can use, if out of period days from create date, voucher will expired')\n sync_multi_session = fields.Boolean('Sync multi session', default=0)\n bus_id = fields.Many2one('pos.bus', string='Branch/store')\n display_person_add_line = fields.Boolean('Display information line', default=0,\n help=\"When you checked, on pos order lines screen, will display information person created order (lines) Eg: create date, updated date ..\")\n quickly_payment = fields.Boolean('Quickly payment', default=0)\n internal_transfer = fields.Boolean('Internal transfer', default=0,\n help='Go Inventory and active multi warehouse and location')\n internal_transfer_auto_validate = fields.Boolean('Internal transfer auto validate', default=0)\n\n discount = fields.Boolean('Global discount', default=0)\n discount_ids = fields.Many2many('pos.global.discount',\n 'pos_config_pos_global_discount_rel',\n 'config_id',\n 'discount_id',\n 'Global discounts')\n is_customer_screen = fields.Boolean('Is customer screen')\n delay = fields.Integer('Delay time', default=3000)\n slogan = fields.Char('Slogan', help='This is message will display on screen of customer')\n image_ids = fields.One2many('pos.config.image', 'config_id', 'Images')\n\n tooltip = fields.Boolean('Show information of product', default=0)\n tooltip_show_last_price = fields.Boolean('Show last price of product',\n help='Show last price of items of customer have bought before',\n default=0)\n tooltip_show_minimum_sale_price = fields.Boolean('Show min of product sale price',\n help='Show minimum sale price of product',\n default=0)\n discount_limit = fields.Boolean('Discount limit', default=0)\n discount_limit_amount = fields.Float('Discount limit amount', default=10)\n discount_each_line = fields.Boolean('Discount each line')\n discount_unlock_limit = fields.Boolean('Manager can unlock limit')\n discount_unlock_limit_user_id = fields.Many2one('res.users', 'User unlock limit amount')\n\n multi_currency = fields.Boolean('Multi currency', default=0)\n multi_currency_update_rate = fields.Boolean('Update rate', default=0)\n\n notify_alert = fields.Boolean('Notify alert',\n help='Turn on/off notification alert on POS sessions.',\n default=0)\n return_products = fields.Boolean('Return orders',\n help='Allow cashier return orders, return products',\n default=0)\n receipt_without_payment_template = fields.Selection([\n ('none', 'None'),\n ('display_price', 'Display price'),\n ('not_display_price', 'Not display price')\n ], default='not_display_price', string='Receipt without payment template')\n lock_order_printed_receipt = fields.Boolean('Lock order printed receipt', default=0)\n staff_level = fields.Selection([\n ('manual', 'Manual config'),\n ('marketing', 'Marketing'),\n ('waiter', 'Waiter'),\n ('cashier', 'Cashier'),\n ('manager', 'Manager')\n ], string='Staff level', default='manual')\n\n validate_payment = fields.Boolean('Validate payment')\n validate_remove_order = fields.Boolean('Validate remove order')\n validate_change_minus = fields.Boolean('Validate pressed +/-')\n validate_quantity_change = fields.Boolean('Validate quantity change')\n validate_price_change = fields.Boolean('Validate price change')\n validate_discount_change = fields.Boolean('Validate discount change')\n validate_close_session = fields.Boolean('Validate close session')\n validate_by_user_id = fields.Many2one('res.users', 'Validate by admin')\n apply_validate_return_mode = fields.Boolean('Validate return mode',\n help='If checked, only applied validate when return order', default=1)\n\n print_user_card = fields.Boolean('Print user card')\n\n product_operation = fields.Boolean('Product Operation', default=0,\n help='Allow cashiers add pos categories and products on pos screen')\n quickly_payment_full = fields.Boolean('Quickly payment full')\n quickly_payment_full_journal_id = fields.Many2one('account.journal', 'Payment mode',\n domain=[('journal_user', '=', True)])\n daily_report = fields.Boolean('Daily report', default=0)\n note_order = fields.Boolean('Note order', default=0)\n note_orderline = fields.Boolean('Note order line', default=0)\n signature_order = fields.Boolean('Signature order', default=0)\n quickly_buttons = fields.Boolean('Quickly Actions', default=0)\n display_amount_discount = fields.Boolean('Display amount discount', default=0)\n\n booking_orders = fields.Boolean('Booking orders', default=0)\n booking_orders_required_cashier_signature = fields.Boolean('Book order required sessions signature',\n help='Checked if need required pos seller signature',\n default=0)\n booking_orders_alert = fields.Boolean('Alert when new order coming', default=0)\n delivery_orders = fields.Boolean('Delivery orders',\n help='Pos clients can get booking orders and delivery orders',\n default=0)\n booking_orders_display_shipping_receipt = fields.Boolean('Display shipping on receipt', default=0)\n\n display_tax_orderline = fields.Boolean('Display tax orderline', default=0)\n display_tax_receipt = fields.Boolean('Display tax receipt', default=0)\n display_fiscal_position_receipt = fields.Boolean('Display fiscal position on receipt', default=0)\n\n display_image_orderline = fields.Boolean('Display image order line', default=0)\n display_image_receipt = fields.Boolean('Display image receipt', default=0)\n duplicate_receipt = fields.Boolean('Duplicate Receipt')\n print_number = fields.Integer('Print number', help='How many number receipt need to print at printer ?', default=0)\n\n lock_session = fields.Boolean('Lock session', default=0)\n category_wise_receipt = fields.Boolean('Category wise receipt', default=0)\n\n management_invoice = fields.Boolean('Management Invoice', default=0)\n invoice_journal_ids = fields.Many2many(\n 'account.journal',\n 'pos_config_invoice_journal_rel',\n 'config_id',\n 'journal_id',\n 'Accounting Invoice Journal',\n domain=[('type', '=', 'sale')],\n help=\"Accounting journal use for create invoices.\")\n send_invoice_email = fields.Boolean('Send email invoice', help='Help cashier send invoice to email of customer',\n default=0)\n lock_print_invoice_on_pos = fields.Boolean('Lock print invoice',\n help='Lock print pdf invoice when clicked button invoice', default=0)\n pos_auto_invoice = fields.Boolean('Auto create invoice',\n help='Automatic create invoice if order have client',\n default=0)\n receipt_invoice_number = fields.Boolean('Add invoice on receipt', help='Show invoice number on receipt header',\n default=0)\n receipt_customer_vat = fields.Boolean('Add vat customer on receipt',\n help='Show customer VAT(TIN) on receipt header', default=0)\n auto_register_payment = fields.Boolean('Auto invocie register payment', default=0)\n\n fiscal_position_auto_detect = fields.Boolean('Fiscal position auto detect', default=0)\n\n display_sale_price_within_tax = fields.Boolean('Display sale price within tax', default=0)\n display_cost_price = fields.Boolean('Display product cost price', default=0)\n display_product_ref = fields.Boolean('Display product ref', default=0)\n multi_location = fields.Boolean('Multi location', default=0)\n product_view = fields.Selection([\n ('box', 'Box view'),\n ('list', 'List view'),\n ], default='box', string='View of products screen', required=1)\n\n ticket_font_size = fields.Integer('Ticket font size', default=12)\n customer_default_id = fields.Many2one('res.partner', 'Customer default')\n medical_insurance = fields.Boolean('Medical insurance', default=0)\n set_guest = fields.Boolean('Set guest', default=0)\n reset_sequence = fields.Boolean('Reset sequence order', default=0)\n update_tax = fields.Boolean('Modify tax', default=0, help='Cashier can change tax of order line')\n subtotal_tax_included = fields.Boolean('Show Tax-Included Prices',\n help='When checked, subtotal of line will display amount have tax-included')\n cash_out = fields.Boolean('Take money out', default=0, help='Allow cashiers take money out')\n cash_in = fields.Boolean('Push money in', default=0, help='Allow cashiers input money in')\n min_length_search = fields.Integer('Min character length search', default=3,\n help='Allow auto suggestion items when cashiers input on search box')\n review_receipt_before_paid = fields.Boolean('Review receipt before paid', help='Show receipt before paid order',\n default=1)\n keyboard_event = fields.Boolean('Keyboard event', default=0, help='Allow cashiers use shortcut keyboard')\n multi_variant = fields.Boolean('Multi variant', default=0,\n help='Allow cashiers change variant of order lines on pos screen')\n switch_user = fields.Boolean('Switch user', default=0, help='Allow cashiers switch to another cashier')\n change_unit_of_measure = fields.Boolean('Change unit of measure', default=0,\n help='Allow cashiers change unit of measure of order lines')\n print_last_order = fields.Boolean('Print last receipt', default=0, help='Allow cashiers print last receipt')\n close_session = fields.Boolean('Close session', help='When cashiers click close pos, auto log out of system',\n default=0)\n display_image_product = fields.Boolean('Display image product', default=1,\n help='Allow hide/display product images on pos screen')\n printer_on_off = fields.Boolean('On/Off printer', help='Help cashier turn on/off printer viva posbox', default=0)\n check_duplicate_email = fields.Boolean('Check duplicate email', default=0)\n check_duplicate_phone = fields.Boolean('Check duplicate phone', default=0)\n hide_country = fields.Boolean('Hide country', default=0)\n hide_barcode = fields.Boolean('Hide barcode', default=0)\n hide_tax = fields.Boolean('Hide tax', default=0)\n hide_pricelist = fields.Boolean('Hide pricelists', default=0)\n hide_supplier = fields.Boolean('Hide suppiers', default=1)\n auto_remove_line = fields.Boolean('Auto remove line',\n default=1,\n help='When cashier set quantity of line to 0, line auto remove not keep line with qty is 0')\n chat = fields.Boolean('Chat message', default=0, help='Allow chat, discuss between pos sessions')\n add_tags = fields.Boolean('Add tags line', default=0, help='Allow cashiers add tags to order lines')\n add_notes = fields.Boolean('Add notes line', default=0, help='Allow cashiers add notes to order lines')\n add_sale_person = fields.Boolean('Add sale person', default=0)\n logo = fields.Binary('Logo of store')\n paid_full = fields.Boolean('Allow paid full', default=0,\n help='Allow cashiers click one button, do payment full order')\n paid_partial = fields.Boolean('Allow partial payment', default=0, help='Allow cashiers do partial payment')\n backup = fields.Boolean('Backup/Restore orders', default=0,\n help='Allow cashiers backup and restore orders on pos screen')\n backup_orders = fields.Text('Backup orders')\n change_logo = fields.Boolean('Change logo', default=1, help='Allow cashiers change logo of shop on pos screen')\n management_session = fields.Boolean('Management session', default=0)\n barcode_receipt = fields.Boolean('Barcode receipt', default=0)\n\n hide_mobile = fields.Boolean('Hide mobile', default=1)\n hide_phone = fields.Boolean('Hide phone', default=1)\n hide_email = fields.Boolean('Hide email', default=1)\n update_client = fields.Boolean('Update client',\n help='Uncheck if you dont want cashier change customer information on pos')\n add_client = fields.Boolean('Add client', help='Uncheck if you dont want cashier add new customers on pos')\n remove_client = fields.Boolean('Remove client', help='Uncheck if you dont want cashier remove customers on pos')\n mobile_responsive = fields.Boolean('Mobile responsive', default=0)\n\n hide_amount_total = fields.Boolean('Hide amount total', default=1)\n hide_amount_taxes = fields.Boolean('Hide amount taxes', default=1)\n\n report_no_of_report = fields.Integer(string=\"No.of Copy Receipt\", default=1)\n report_signature = fields.Boolean(string=\"Report Signature\", default=1)\n\n report_product_summary = fields.Boolean(string=\"Report Product Summary\", default=1)\n report_product_current_month_date = fields.Boolean(string=\"Report This Month\", default=1)\n\n report_order_summary = fields.Boolean(string='Report Order Summary', default=1)\n report_order_current_month_date = fields.Boolean(string=\"Report Current Month\", default=1)\n\n report_payment_summary = fields.Boolean(string=\"Report Payment Summary\", default=1)\n report_payment_current_month_date = fields.Boolean(string=\"Payment Current Month\", default=1)\n\n active_product_sort_by = fields.Boolean('Active product sort by', default=1)\n default_product_sort_by = fields.Selection([\n ('a_z', 'Sort from A to Z'),\n ('z_a', 'Sort from Z to A'),\n ('low_price', 'Sort from low to high price'),\n ('high_price', 'Sort from high to low price'),\n ('pos_sequence', 'Product pos sequence')\n ], string='Default sort by', default='a_z')\n sale_extra = fields.Boolean('Sale extra', default=1)\n required_add_customer_before_put_product_to_cart = fields.Boolean('Required add customer first',\n help='If you checked on this checkbox, in POS always required cashier add customer the first')\n only_one_time_add_customer = fields.Boolean('Only one time add customer',\n help='Each orders, only one time add customer')\n use_parameters = fields.Boolean('Use parameters', help='POS need only one time save parameter datas use on POS, and next times no need call backend', default=1)\n time_refresh_parameter = fields.Integer('Time refresh datas (seconds)', help='Time for refresh parameters data', default=30)\n\n @api.model\n def switch_mobile_mode(self, config_id, vals):\n if vals.get('mobile_responsive') == True:\n vals['product_view'] = 'box'\n return self.browse(config_id).sudo().write(vals)\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=', config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany(\n [[(self.env.cr.dbname, 'pos.indexed_db', session.user_id.id), json.dumps({\n 'db': self.env.cr.dbname\n })]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/pos/web/',\n 'target': 'self',\n }\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=', config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany(\n [[(self.env.cr.dbname, 'pos.indexed_db', session.user_id.id), json.dumps({\n 'db': self.env.cr.dbname\n })]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/pos/web/',\n 'target': 'self',\n }\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many', 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields=False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\" % (\n min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name, min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in ['date',\n 'datetime'] and value:\n data[field] = value.strftime(DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {\n 'active': True,\n 'min_id': min_id,\n 'max_id': max_id,\n 'call_fields': json.dumps(field_list),\n 'call_results': json.dumps(datas),\n 'call_model': model_name,\n 'call_domain': json.dumps(domain),\n }\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n\n @api.onchange('receipt_invoice_number')\n def _onchange_receipt_invoice_number(self):\n if self.receipt_invoice_number == True:\n self.lock_print_invoice_on_pos = False\n else:\n self.lock_print_invoice_on_pos = True\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get('expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get('default_cashbox_lines_ids'):\n if not config.default_cashbox_lines_ids and not config.cash_control:\n raise UserError('Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if config.validate_by_user_id and not config.validate_by_user_id.pos_security_pin:\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password' % (\n config.validate_by_user_id.name))\n if config.discount_unlock_limit_user_id and not config.discount_unlock_limit_user_id.pos_security_pin:\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password' % (\n config.discount_unlock_limit_user_id.name))\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if config.management_session and not config.default_cashbox_lines_ids and not config.cash_control:\n raise UserError('Please go to Cash control and add Default Opening')\n if config.validate_by_user_id and not config.validate_by_user_id.pos_security_pin:\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password' % (\n config.validate_by_user_id.name))\n if config.discount_unlock_limit_user_id and not config.discount_unlock_limit_user_id.pos_security_pin:\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password' % (\n config.discount_unlock_limit_user_id.name))\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([\n ('code', '=', 'UWJ'),\n ('company_id', '=', user.company_id.id),\n ])\n if wallet_journal:\n return wallet_journal.sudo().write({\n 'pos_method_type': 'wallet'\n })\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([\n ('code', '=', 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({\n 'name': 'Account wallet',\n 'code': 'AUW',\n 'user_type_id': self.env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id,\n 'note': 'code \"AUW\" auto give wallet amount of customers',\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'account_use_wallet' + str(user.company_id.id),\n 'model': 'account.account',\n 'module': 'pos_retail',\n 'res_id': wallet_account.id,\n 'noupdate': True, # If it's False, target record (res_id) will be removed while module update\n })\n\n wallet_journal_inactive = Journal.sudo().search([\n ('code', '=', 'UWJ'),\n ('company_id', '=', user.company_id.id),\n ('pos_method_type', '=', 'wallet')\n ])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet',\n 'sequence': 100,\n })\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({\n 'name': 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3,\n 'prefix': 'UW ' + str(user.company_id.id),\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'journal_sequence' + str(new_sequence.id),\n 'model': 'ir.sequence',\n 'module': 'pos_retail',\n 'res_id': new_sequence.id,\n 'noupdate': True,\n })\n wallet_journal = Journal.sudo().create({\n 'name': 'Wallet',\n 'code': 'UWJ',\n 'type': 'cash',\n 'pos_method_type': 'wallet',\n 'journal_user': True,\n 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'sequence': 100,\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'use_wallet_journal_' + str(wallet_journal.id),\n 'model': 'account.journal',\n 'module': 'pos_retail',\n 'res_id': int(wallet_journal.id),\n 'noupdate': True,\n })\n\n config = self\n config.sudo().write({\n 'journal_ids': [(4, wallet_journal.id)],\n })\n\n statement = [(0, 0, {\n 'journal_id': wallet_journal.id,\n 'user_id': user.id,\n 'company_id': user.company_id.id\n })]\n current_session = config.current_session_id\n current_session.sudo().write({\n 'statement_ids': statement,\n })\n return\n\n def init_voucher_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([\n ('code', '=', 'VCJ'),\n ('company_id', '=', user.company_id.id),\n ])\n if voucher_journal:\n return voucher_journal.sudo().write({\n 'pos_method_type': 'voucher'\n })\n Account = self.env['account.account']\n voucher_account_old_version = Account.sudo().search([\n ('code', '=', 'AVC'), ('company_id', '=', user.company_id.id)])\n if voucher_account_old_version:\n voucher_account = voucher_account_old_version[0]\n else:\n voucher_account = Account.sudo().create({\n 'name': 'Account voucher',\n 'code': 'AVC',\n 'user_type_id': self.env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id,\n 'note': 'code \"AVC\" auto give voucher histories of customers',\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'account_voucher' + str(user.company_id.id),\n 'model': 'account.account',\n 'module': 'pos_retail',\n 'res_id': voucher_account.id,\n 'noupdate': True, # If it's False, target record (res_id) will be removed while module update\n })\n\n voucher_journal = Journal.sudo().search([\n ('code', '=', 'VCJ'),\n ('company_id', '=', user.company_id.id),\n ('pos_method_type', '=', 'voucher')\n ])\n if voucher_journal:\n voucher_journal[0].sudo().write({\n 'voucher': True,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id,\n 'pos_method_type': 'voucher',\n 'sequence': 101,\n })\n voucher_journal = voucher_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({\n 'name': 'Account Voucher ' + str(user.company_id.id),\n 'padding': 3,\n 'prefix': 'AVC ' + str(user.company_id.id),\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'journal_sequence' + str(new_sequence.id),\n 'model': 'ir.sequence',\n 'module': 'pos_retail',\n 'res_id': new_sequence.id,\n 'noupdate': True,\n })\n voucher_journal = Journal.sudo().create({\n 'name': 'Voucher',\n 'code': 'VCJ',\n 'type': 'cash',\n 'pos_method_type': 'voucher',\n 'journal_user': True,\n 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id,\n 'sequence': 101,\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'journal_voucher_' + str(voucher_journal.id),\n 'model': 'account.journal',\n 'module': 'pos_retail',\n 'res_id': int(voucher_journal.id),\n 'noupdate': True,\n })\n\n config = self\n config.sudo().write({\n 'journal_ids': [(4, voucher_journal.id)],\n })\n\n statement = [(0, 0, {\n 'journal_id': voucher_journal.id,\n 'user_id': user.id,\n 'company_id': user.company_id.id\n })]\n current_session = config.current_session_id\n current_session.sudo().write({\n 'statement_ids': statement,\n })\n return\n\n def init_credit_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([\n ('code', '=', 'CJ'),\n ('company_id', '=', user.company_id.id),\n ])\n if voucher_journal:\n return voucher_journal.sudo().write({\n 'pos_method_type': 'credit'\n })\n Account = self.env['account.account']\n credit_account_old_version = Account.sudo().search([\n ('code', '=', 'ACJ'), ('company_id', '=', user.company_id.id)])\n if credit_account_old_version:\n credit_account = credit_account_old_version[0]\n else:\n credit_account = Account.sudo().create({\n 'name': 'Credit Account',\n 'code': 'CA',\n 'user_type_id': self.env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id,\n 'note': 'code \"CA\" give credit payment customer',\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'account_credit' + str(user.company_id.id),\n 'model': 'account.account',\n 'module': 'pos_retail',\n 'res_id': credit_account.id,\n 'noupdate': True, # If it's False, target record (res_id) will be removed while module update\n })\n\n credit_journal = Journal.sudo().search([\n ('code', '=', 'CJ'),\n ('company_id', '=', user.company_id.id),\n ('pos_method_type', '=', 'credit')\n ])\n if credit_journal:\n credit_journal[0].sudo().write({\n 'credit': True,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'pos_method_type': 'credit',\n 'sequence': 102,\n })\n credit_journal = credit_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({\n 'name': 'Credit account ' + str(user.company_id.id),\n 'padding': 3,\n 'prefix': 'CA ' + str(user.company_id.id),\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'journal_sequence' + str(new_sequence.id),\n 'model': 'ir.sequence',\n 'module': 'pos_retail',\n 'res_id': new_sequence.id,\n 'noupdate': True,\n })\n credit_journal = Journal.sudo().create({\n 'name': 'Customer Credit',\n 'code': 'CJ',\n 'type': 'cash',\n 'pos_method_type': 'credit',\n 'journal_user': True,\n 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'sequence': 102,\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'credit_journal_' + str(credit_journal.id),\n 'model': 'account.journal',\n 'module': 'pos_retail',\n 'res_id': int(credit_journal.id),\n 'noupdate': True,\n })\n\n config = self\n config.sudo().write({\n 'journal_ids': [(4, credit_journal.id)],\n })\n\n statement = [(0, 0, {\n 'journal_id': credit_journal.id,\n 'user_id': user.id,\n 'company_id': user.company_id.id\n })]\n current_session = config.current_session_id\n current_session.sudo().write({\n 'statement_ids': statement,\n })\n return True\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([\n ('code', '=', 'ROJ'),\n ('company_id', '=', user.company_id.id),\n ])\n if return_journal:\n return return_journal.sudo().write({\n 'pos_method_type': 'return'\n })\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([\n ('code', '=', 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({\n 'name': 'Return Order Account',\n 'code': 'ARO',\n 'user_type_id': self.env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id,\n 'note': 'code \"ARO\" give return order from customer',\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'return_account' + str(user.company_id.id),\n 'model': 'account.account',\n 'module': 'pos_retail',\n 'res_id': return_account.id,\n 'noupdate': True, # If it's False, target record (res_id) will be removed while module update\n })\n\n return_journal = Journal.sudo().search([\n ('code', '=', 'ROJ'),\n ('company_id', '=', user.company_id.id),\n ])\n if return_journal:\n return_journal[0].sudo().write({\n 'default_debit_account_id': return_account.id,\n 'default_credit_account_id': return_account.id,\n 'pos_method_type': 'return'\n })\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({\n 'name': 'Return account ' + str(user.company_id.id),\n 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id),\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'journal_sequence' + str(new_sequence.id),\n 'model': 'ir.sequence',\n 'module': 'pos_retail',\n 'res_id': new_sequence.id,\n 'noupdate': True,\n })\n return_journal = Journal.sudo().create({\n 'name': 'Return Order Customer',\n 'code': 'ROJ',\n 'type': 'cash',\n 'pos_method_type': 'return',\n 'journal_user': True,\n 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': return_account.id,\n 'default_credit_account_id': return_account.id,\n 'sequence': 103,\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'return_journal_' + str(return_journal.id),\n 'model': 'account.journal',\n 'module': 'pos_retail',\n 'res_id': int(return_journal.id),\n 'noupdate': True,\n })\n\n config = self\n config.sudo().write({\n 'journal_ids': [(4, return_journal.id)],\n })\n\n statement = [(0, 0, {\n 'journal_id': return_journal.id,\n 'user_id': user.id,\n 'company_id': user.company_id.id\n })]\n current_session = config.current_session_id\n current_session.sudo().write({\n 'statement_ids': statement,\n })\n return True\n\n def init_rounding_journal(self):\n Journal = self.env['account.journal']\n Account = self.env['account.account']\n user = self.env.user\n rounding_journal = Journal.sudo().search([\n ('code', '=', 'RDJ'),\n ('company_id', '=', user.company_id.id),\n ])\n if rounding_journal:\n return rounding_journal.sudo().write({\n 'pos_method_type': 'rounding'\n })\n rounding_account_old_version = Account.sudo().search([\n ('code', '=', 'AAR'), ('company_id', '=', user.company_id.id)])\n if rounding_account_old_version:\n rounding_account = rounding_account_old_version[0]\n else:\n _logger.info('rounding_account have not')\n rounding_account = Account.sudo().create({\n 'name': 'Rounding Account',\n 'code': 'AAR',\n 'user_type_id': self.env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id,\n 'note': 'code \"AAR\" give rounding pos order',\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'rounding_account' + str(user.company_id.id),\n 'model': 'account.account',\n 'module': 'pos_retail',\n 'res_id': rounding_account.id,\n 'noupdate': True,\n })\n rounding_journal = Journal.sudo().search([\n ('pos_method_type', '=', 'rounding'),\n ('company_id', '=', user.company_id.id),\n ])\n if rounding_journal:\n rounding_journal[0].sudo().write({\n 'name': 'Rounding',\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'pos_method_type': 'rounding',\n 'code': 'RDJ'\n })\n rounding_journal = rounding_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({\n 'name': 'rounding account ' + str(user.company_id.id),\n 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id),\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'journal_sequence' + str(new_sequence.id),\n 'model': 'ir.sequence',\n 'module': 'pos_retail',\n 'res_id': new_sequence.id,\n 'noupdate': True,\n })\n rounding_journal = Journal.sudo().create({\n 'name': 'Rounding',\n 'code': 'RDJ',\n 'type': 'cash',\n 'pos_method_type': 'rounding',\n 'journal_user': True,\n 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'sequence': 103,\n })\n self.env['ir.model.data'].sudo().create({\n 'name': 'rounding_journal_' + str(rounding_journal.id),\n 'model': 'account.journal',\n 'module': 'pos_retail',\n 'res_id': int(rounding_journal.id),\n 'noupdate': True,\n })\n\n config = self\n config.sudo().write({\n 'journal_ids': [(4, rounding_journal.id)],\n })\n\n statement = [(0, 0, {\n 'journal_id': rounding_journal.id,\n 'user_id': user.id,\n 'company_id': user.company_id.id\n })]\n current_session = config.current_session_id\n current_session.sudo().write({\n 'statement_ids': statement,\n })\n return True\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"from odoo import api, fields, models, _\nimport odoo\nimport logging\nfrom odoo.exceptions import UserError\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT\nimport json\nimport io\nimport os\nimport timeit\ntry:\n to_unicode = unicode\nexcept NameError:\n to_unicode = str\n_logger = logging.getLogger(__name__)\n\n\nclass pos_config_image(models.Model):\n _name = 'pos.config.image'\n _description = 'Image show to customer screen'\n name = fields.Char('Title', required=1)\n image = fields.Binary('Image', required=1)\n config_id = fields.Many2one('pos.config', 'POS config', required=1)\n description = fields.Text('Description')\n\n\nclass pos_config(models.Model):\n _inherit = 'pos.config'\n user_id = fields.Many2one('res.users', 'Assigned to')\n config_access_right = fields.Boolean('Config access right', default=1)\n allow_discount = fields.Boolean('Change discount', default=1)\n allow_qty = fields.Boolean('Change quantity', default=1)\n allow_price = fields.Boolean('Change price', default=1)\n allow_remove_line = fields.Boolean('Remove line', default=1)\n allow_numpad = fields.Boolean('Display numpad', default=1)\n allow_payment = fields.Boolean('Display payment', default=1)\n allow_customer = fields.Boolean('Choice customer', default=1)\n allow_add_order = fields.Boolean('New order', default=1)\n allow_remove_order = fields.Boolean('Remove order', default=1)\n allow_add_product = fields.Boolean('Add line', default=1)\n allow_lock_screen = fields.Boolean('Lock screen', default=0, help=\n 'When pos sessions start, cashiers required open POS viva pos pass pin (Setting/Users)'\n )\n display_point_receipt = fields.Boolean('Display point / receipt')\n loyalty_id = fields.Many2one('pos.loyalty', 'Loyalty', domain=[('state',\n '=', 'running')])\n promotion_ids = fields.Many2many('pos.promotion',\n 'pos_config_promotion_rel', 'config_id', 'promotion_id', string=\n 'Promotion programs')\n promotion_manual_select = fields.Boolean('Promotion manual choice',\n default=0)\n create_purchase_order = fields.Boolean('Create PO', default=0)\n create_purchase_order_required_signature = fields.Boolean(\n 'Required signature', default=0)\n purchase_order_state = fields.Selection([('confirm_order',\n 'Auto confirm'), ('confirm_picking', 'Auto delivery'), (\n 'confirm_invoice', 'Auto invoice')], 'PO state', help=\n 'This is state of purchase order will process to', default=\n 'confirm_invoice')\n sync_sale_order = fields.Boolean('Sync sale orders', default=0)\n sale_order = fields.Boolean('Create Sale order', default=0)\n sale_order_auto_confirm = fields.Boolean('Auto confirm', default=0)\n sale_order_auto_invoice = fields.Boolean('Auto paid', default=0)\n sale_order_auto_delivery = fields.Boolean('Auto delivery', default=0)\n pos_orders_management = fields.Boolean('POS order management', default=0)\n pos_order_period_return_days = fields.Float('Return period days', help=\n 'this is period time for customer can return order', default=30)\n display_return_days_receipt = fields.Boolean('Display return days receipt',\n default=0)\n sync_pricelist = fields.Boolean('Sync prices list', default=0)\n display_onhand = fields.Boolean('Show qty available product', default=1,\n help='Display quantity on hand all products on pos screen')\n large_stocks = fields.Boolean('Large stock', help=\n 'If count products bigger than 100,000 rows, please check it')\n allow_order_out_of_stock = fields.Boolean('Allow out-of-stock', default\n =1, help='If checked, allow cashier can add product have out of stock')\n allow_of_stock_approve_by_admin = fields.Boolean('Approve allow of stock',\n help='Allow manager approve allow of stock')\n print_voucher = fields.Boolean('Print vouchers', help=\n 'Reprint last vouchers', default=1)\n scan_voucher = fields.Boolean('Scan voucher', default=0)\n expired_days_voucher = fields.Integer('Expired days of voucher',\n default=30, help=\n 'Total days keep voucher can use, if out of period days from create date, voucher will expired'\n )\n sync_multi_session = fields.Boolean('Sync multi session', default=0)\n bus_id = fields.Many2one('pos.bus', string='Branch/store')\n display_person_add_line = fields.Boolean('Display information line',\n default=0, help=\n 'When you checked, on pos order lines screen, will display information person created order (lines) Eg: create date, updated date ..'\n )\n quickly_payment = fields.Boolean('Quickly payment', default=0)\n internal_transfer = fields.Boolean('Internal transfer', default=0, help\n ='Go Inventory and active multi warehouse and location')\n internal_transfer_auto_validate = fields.Boolean(\n 'Internal transfer auto validate', default=0)\n discount = fields.Boolean('Global discount', default=0)\n discount_ids = fields.Many2many('pos.global.discount',\n 'pos_config_pos_global_discount_rel', 'config_id', 'discount_id',\n 'Global discounts')\n is_customer_screen = fields.Boolean('Is customer screen')\n delay = fields.Integer('Delay time', default=3000)\n slogan = fields.Char('Slogan', help=\n 'This is message will display on screen of customer')\n image_ids = fields.One2many('pos.config.image', 'config_id', 'Images')\n tooltip = fields.Boolean('Show information of product', default=0)\n tooltip_show_last_price = fields.Boolean('Show last price of product',\n help='Show last price of items of customer have bought before',\n default=0)\n tooltip_show_minimum_sale_price = fields.Boolean(\n 'Show min of product sale price', help=\n 'Show minimum sale price of product', default=0)\n discount_limit = fields.Boolean('Discount limit', default=0)\n discount_limit_amount = fields.Float('Discount limit amount', default=10)\n discount_each_line = fields.Boolean('Discount each line')\n discount_unlock_limit = fields.Boolean('Manager can unlock limit')\n discount_unlock_limit_user_id = fields.Many2one('res.users',\n 'User unlock limit amount')\n multi_currency = fields.Boolean('Multi currency', default=0)\n multi_currency_update_rate = fields.Boolean('Update rate', default=0)\n notify_alert = fields.Boolean('Notify alert', help=\n 'Turn on/off notification alert on POS sessions.', default=0)\n return_products = fields.Boolean('Return orders', help=\n 'Allow cashier return orders, return products', default=0)\n receipt_without_payment_template = fields.Selection([('none', 'None'),\n ('display_price', 'Display price'), ('not_display_price',\n 'Not display price')], default='not_display_price', string=\n 'Receipt without payment template')\n lock_order_printed_receipt = fields.Boolean('Lock order printed receipt',\n default=0)\n staff_level = fields.Selection([('manual', 'Manual config'), (\n 'marketing', 'Marketing'), ('waiter', 'Waiter'), ('cashier',\n 'Cashier'), ('manager', 'Manager')], string='Staff level', default=\n 'manual')\n validate_payment = fields.Boolean('Validate payment')\n validate_remove_order = fields.Boolean('Validate remove order')\n validate_change_minus = fields.Boolean('Validate pressed +/-')\n validate_quantity_change = fields.Boolean('Validate quantity change')\n validate_price_change = fields.Boolean('Validate price change')\n validate_discount_change = fields.Boolean('Validate discount change')\n validate_close_session = fields.Boolean('Validate close session')\n validate_by_user_id = fields.Many2one('res.users', 'Validate by admin')\n apply_validate_return_mode = fields.Boolean('Validate return mode',\n help='If checked, only applied validate when return order', default=1)\n print_user_card = fields.Boolean('Print user card')\n product_operation = fields.Boolean('Product Operation', default=0, help\n ='Allow cashiers add pos categories and products on pos screen')\n quickly_payment_full = fields.Boolean('Quickly payment full')\n quickly_payment_full_journal_id = fields.Many2one('account.journal',\n 'Payment mode', domain=[('journal_user', '=', True)])\n daily_report = fields.Boolean('Daily report', default=0)\n note_order = fields.Boolean('Note order', default=0)\n note_orderline = fields.Boolean('Note order line', default=0)\n signature_order = fields.Boolean('Signature order', default=0)\n quickly_buttons = fields.Boolean('Quickly Actions', default=0)\n display_amount_discount = fields.Boolean('Display amount discount',\n default=0)\n booking_orders = fields.Boolean('Booking orders', default=0)\n booking_orders_required_cashier_signature = fields.Boolean(\n 'Book order required sessions signature', help=\n 'Checked if need required pos seller signature', default=0)\n booking_orders_alert = fields.Boolean('Alert when new order coming',\n default=0)\n delivery_orders = fields.Boolean('Delivery orders', help=\n 'Pos clients can get booking orders and delivery orders', default=0)\n booking_orders_display_shipping_receipt = fields.Boolean(\n 'Display shipping on receipt', default=0)\n display_tax_orderline = fields.Boolean('Display tax orderline', default=0)\n display_tax_receipt = fields.Boolean('Display tax receipt', default=0)\n display_fiscal_position_receipt = fields.Boolean(\n 'Display fiscal position on receipt', default=0)\n display_image_orderline = fields.Boolean('Display image order line',\n default=0)\n display_image_receipt = fields.Boolean('Display image receipt', default=0)\n duplicate_receipt = fields.Boolean('Duplicate Receipt')\n print_number = fields.Integer('Print number', help=\n 'How many number receipt need to print at printer ?', default=0)\n lock_session = fields.Boolean('Lock session', default=0)\n category_wise_receipt = fields.Boolean('Category wise receipt', default=0)\n management_invoice = fields.Boolean('Management Invoice', default=0)\n invoice_journal_ids = fields.Many2many('account.journal',\n 'pos_config_invoice_journal_rel', 'config_id', 'journal_id',\n 'Accounting Invoice Journal', domain=[('type', '=', 'sale')], help=\n 'Accounting journal use for create invoices.')\n send_invoice_email = fields.Boolean('Send email invoice', help=\n 'Help cashier send invoice to email of customer', default=0)\n lock_print_invoice_on_pos = fields.Boolean('Lock print invoice', help=\n 'Lock print pdf invoice when clicked button invoice', default=0)\n pos_auto_invoice = fields.Boolean('Auto create invoice', help=\n 'Automatic create invoice if order have client', default=0)\n receipt_invoice_number = fields.Boolean('Add invoice on receipt', help=\n 'Show invoice number on receipt header', default=0)\n receipt_customer_vat = fields.Boolean('Add vat customer on receipt',\n help='Show customer VAT(TIN) on receipt header', default=0)\n auto_register_payment = fields.Boolean('Auto invocie register payment',\n default=0)\n fiscal_position_auto_detect = fields.Boolean('Fiscal position auto detect',\n default=0)\n display_sale_price_within_tax = fields.Boolean(\n 'Display sale price within tax', default=0)\n display_cost_price = fields.Boolean('Display product cost price', default=0\n )\n display_product_ref = fields.Boolean('Display product ref', default=0)\n multi_location = fields.Boolean('Multi location', default=0)\n product_view = fields.Selection([('box', 'Box view'), ('list',\n 'List view')], default='box', string='View of products screen',\n required=1)\n ticket_font_size = fields.Integer('Ticket font size', default=12)\n customer_default_id = fields.Many2one('res.partner', 'Customer default')\n medical_insurance = fields.Boolean('Medical insurance', default=0)\n set_guest = fields.Boolean('Set guest', default=0)\n reset_sequence = fields.Boolean('Reset sequence order', default=0)\n update_tax = fields.Boolean('Modify tax', default=0, help=\n 'Cashier can change tax of order line')\n subtotal_tax_included = fields.Boolean('Show Tax-Included Prices', help\n ='When checked, subtotal of line will display amount have tax-included'\n )\n cash_out = fields.Boolean('Take money out', default=0, help=\n 'Allow cashiers take money out')\n cash_in = fields.Boolean('Push money in', default=0, help=\n 'Allow cashiers input money in')\n min_length_search = fields.Integer('Min character length search',\n default=3, help=\n 'Allow auto suggestion items when cashiers input on search box')\n review_receipt_before_paid = fields.Boolean('Review receipt before paid',\n help='Show receipt before paid order', default=1)\n keyboard_event = fields.Boolean('Keyboard event', default=0, help=\n 'Allow cashiers use shortcut keyboard')\n multi_variant = fields.Boolean('Multi variant', default=0, help=\n 'Allow cashiers change variant of order lines on pos screen')\n switch_user = fields.Boolean('Switch user', default=0, help=\n 'Allow cashiers switch to another cashier')\n change_unit_of_measure = fields.Boolean('Change unit of measure',\n default=0, help='Allow cashiers change unit of measure of order lines')\n print_last_order = fields.Boolean('Print last receipt', default=0, help\n ='Allow cashiers print last receipt')\n close_session = fields.Boolean('Close session', help=\n 'When cashiers click close pos, auto log out of system', default=0)\n display_image_product = fields.Boolean('Display image product', default\n =1, help='Allow hide/display product images on pos screen')\n printer_on_off = fields.Boolean('On/Off printer', help=\n 'Help cashier turn on/off printer viva posbox', default=0)\n check_duplicate_email = fields.Boolean('Check duplicate email', default=0)\n check_duplicate_phone = fields.Boolean('Check duplicate phone', default=0)\n hide_country = fields.Boolean('Hide country', default=0)\n hide_barcode = fields.Boolean('Hide barcode', default=0)\n hide_tax = fields.Boolean('Hide tax', default=0)\n hide_pricelist = fields.Boolean('Hide pricelists', default=0)\n hide_supplier = fields.Boolean('Hide suppiers', default=1)\n auto_remove_line = fields.Boolean('Auto remove line', default=1, help=\n 'When cashier set quantity of line to 0, line auto remove not keep line with qty is 0'\n )\n chat = fields.Boolean('Chat message', default=0, help=\n 'Allow chat, discuss between pos sessions')\n add_tags = fields.Boolean('Add tags line', default=0, help=\n 'Allow cashiers add tags to order lines')\n add_notes = fields.Boolean('Add notes line', default=0, help=\n 'Allow cashiers add notes to order lines')\n add_sale_person = fields.Boolean('Add sale person', default=0)\n logo = fields.Binary('Logo of store')\n paid_full = fields.Boolean('Allow paid full', default=0, help=\n 'Allow cashiers click one button, do payment full order')\n paid_partial = fields.Boolean('Allow partial payment', default=0, help=\n 'Allow cashiers do partial payment')\n backup = fields.Boolean('Backup/Restore orders', default=0, help=\n 'Allow cashiers backup and restore orders on pos screen')\n backup_orders = fields.Text('Backup orders')\n change_logo = fields.Boolean('Change logo', default=1, help=\n 'Allow cashiers change logo of shop on pos screen')\n management_session = fields.Boolean('Management session', default=0)\n barcode_receipt = fields.Boolean('Barcode receipt', default=0)\n hide_mobile = fields.Boolean('Hide mobile', default=1)\n hide_phone = fields.Boolean('Hide phone', default=1)\n hide_email = fields.Boolean('Hide email', default=1)\n update_client = fields.Boolean('Update client', help=\n 'Uncheck if you dont want cashier change customer information on pos')\n add_client = fields.Boolean('Add client', help=\n 'Uncheck if you dont want cashier add new customers on pos')\n remove_client = fields.Boolean('Remove client', help=\n 'Uncheck if you dont want cashier remove customers on pos')\n mobile_responsive = fields.Boolean('Mobile responsive', default=0)\n hide_amount_total = fields.Boolean('Hide amount total', default=1)\n hide_amount_taxes = fields.Boolean('Hide amount taxes', default=1)\n report_no_of_report = fields.Integer(string='No.of Copy Receipt', default=1\n )\n report_signature = fields.Boolean(string='Report Signature', default=1)\n report_product_summary = fields.Boolean(string='Report Product Summary',\n default=1)\n report_product_current_month_date = fields.Boolean(string=\n 'Report This Month', default=1)\n report_order_summary = fields.Boolean(string='Report Order Summary',\n default=1)\n report_order_current_month_date = fields.Boolean(string=\n 'Report Current Month', default=1)\n report_payment_summary = fields.Boolean(string='Report Payment Summary',\n default=1)\n report_payment_current_month_date = fields.Boolean(string=\n 'Payment Current Month', default=1)\n active_product_sort_by = fields.Boolean('Active product sort by', default=1\n )\n default_product_sort_by = fields.Selection([('a_z', 'Sort from A to Z'),\n ('z_a', 'Sort from Z to A'), ('low_price',\n 'Sort from low to high price'), ('high_price',\n 'Sort from high to low price'), ('pos_sequence',\n 'Product pos sequence')], string='Default sort by', default='a_z')\n sale_extra = fields.Boolean('Sale extra', default=1)\n required_add_customer_before_put_product_to_cart = fields.Boolean(\n 'Required add customer first', help=\n 'If you checked on this checkbox, in POS always required cashier add customer the first'\n )\n only_one_time_add_customer = fields.Boolean('Only one time add customer',\n help='Each orders, only one time add customer')\n use_parameters = fields.Boolean('Use parameters', help=\n 'POS need only one time save parameter datas use on POS, and next times no need call backend'\n , default=1)\n time_refresh_parameter = fields.Integer('Time refresh datas (seconds)',\n help='Time for refresh parameters data', default=30)\n\n @api.model\n def switch_mobile_mode(self, config_id, vals):\n if vals.get('mobile_responsive') == True:\n vals['product_view'] = 'box'\n return self.browse(config_id).sudo().write(vals)\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n\n @api.onchange('receipt_invoice_number')\n def _onchange_receipt_invoice_number(self):\n if self.receipt_invoice_number == True:\n self.lock_print_invoice_on_pos = False\n else:\n self.lock_print_invoice_on_pos = True\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if (config.management_session and not config.\n default_cashbox_lines_ids and not config.cash_control):\n raise UserError('Please go to Cash control and add Default Opening'\n )\n if (config.validate_by_user_id and not config.validate_by_user_id.\n pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_voucher_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'voucher'})\n Account = self.env['account.account']\n voucher_account_old_version = Account.sudo().search([('code', '=',\n 'AVC'), ('company_id', '=', user.company_id.id)])\n if voucher_account_old_version:\n voucher_account = voucher_account_old_version[0]\n else:\n voucher_account = Account.sudo().create({'name':\n 'Account voucher', 'code': 'AVC', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AVC\" auto give voucher histories of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_voucher' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n voucher_account.id, 'noupdate': True})\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'voucher')])\n if voucher_journal:\n voucher_journal[0].sudo().write({'voucher': True,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id,\n 'pos_method_type': 'voucher', 'sequence': 101})\n voucher_journal = voucher_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Voucher ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'AVC ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n voucher_journal = Journal.sudo().create({'name': 'Voucher',\n 'code': 'VCJ', 'type': 'cash', 'pos_method_type': 'voucher',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id, 'sequence':\n 101})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_voucher_' + str(voucher_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n voucher_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, voucher_journal.id)]})\n statement = [(0, 0, {'journal_id': voucher_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_credit_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'credit'})\n Account = self.env['account.account']\n credit_account_old_version = Account.sudo().search([('code', '=',\n 'ACJ'), ('company_id', '=', user.company_id.id)])\n if credit_account_old_version:\n credit_account = credit_account_old_version[0]\n else:\n credit_account = Account.sudo().create({'name':\n 'Credit Account', 'code': 'CA', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"CA\" give credit payment customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_credit' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n credit_account.id, 'noupdate': True})\n credit_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'credit')])\n if credit_journal:\n credit_journal[0].sudo().write({'credit': True,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'pos_method_type': 'credit', 'sequence': 102})\n credit_journal = credit_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Credit account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'CA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n credit_journal = Journal.sudo().create({'name':\n 'Customer Credit', 'code': 'CJ', 'type': 'cash',\n 'pos_method_type': 'credit', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': credit_account.\n id, 'default_credit_account_id': credit_account.id,\n 'sequence': 102})\n self.env['ir.model.data'].sudo().create({'name': \n 'credit_journal_' + str(credit_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n credit_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, credit_journal.id)]})\n statement = [(0, 0, {'journal_id': credit_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_rounding_journal(self):\n Journal = self.env['account.journal']\n Account = self.env['account.account']\n user = self.env.user\n rounding_journal = Journal.sudo().search([('code', '=', 'RDJ'), (\n 'company_id', '=', user.company_id.id)])\n if rounding_journal:\n return rounding_journal.sudo().write({'pos_method_type':\n 'rounding'})\n rounding_account_old_version = Account.sudo().search([('code', '=',\n 'AAR'), ('company_id', '=', user.company_id.id)])\n if rounding_account_old_version:\n rounding_account = rounding_account_old_version[0]\n else:\n _logger.info('rounding_account have not')\n rounding_account = Account.sudo().create({'name':\n 'Rounding Account', 'code': 'AAR', 'user_type_id': self.env\n .ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AAR\" give rounding pos order'})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n rounding_account.id, 'noupdate': True})\n rounding_journal = Journal.sudo().search([('pos_method_type', '=',\n 'rounding'), ('company_id', '=', user.company_id.id)])\n if rounding_journal:\n rounding_journal[0].sudo().write({'name': 'Rounding',\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'pos_method_type': 'rounding', 'code': 'RDJ'})\n rounding_journal = rounding_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'rounding account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n rounding_journal = Journal.sudo().create({'name': 'Rounding',\n 'code': 'RDJ', 'type': 'cash', 'pos_method_type':\n 'rounding', 'journal_user': True, 'sequence_id':\n new_sequence.id, 'company_id': user.company_id.id,\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_journal_' + str(rounding_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n rounding_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, rounding_journal.id)]})\n statement = [(0, 0, {'journal_id': rounding_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\ntry:\n to_unicode = unicode\nexcept NameError:\n to_unicode = str\n_logger = logging.getLogger(__name__)\n\n\nclass pos_config_image(models.Model):\n _name = 'pos.config.image'\n _description = 'Image show to customer screen'\n name = fields.Char('Title', required=1)\n image = fields.Binary('Image', required=1)\n config_id = fields.Many2one('pos.config', 'POS config', required=1)\n description = fields.Text('Description')\n\n\nclass pos_config(models.Model):\n _inherit = 'pos.config'\n user_id = fields.Many2one('res.users', 'Assigned to')\n config_access_right = fields.Boolean('Config access right', default=1)\n allow_discount = fields.Boolean('Change discount', default=1)\n allow_qty = fields.Boolean('Change quantity', default=1)\n allow_price = fields.Boolean('Change price', default=1)\n allow_remove_line = fields.Boolean('Remove line', default=1)\n allow_numpad = fields.Boolean('Display numpad', default=1)\n allow_payment = fields.Boolean('Display payment', default=1)\n allow_customer = fields.Boolean('Choice customer', default=1)\n allow_add_order = fields.Boolean('New order', default=1)\n allow_remove_order = fields.Boolean('Remove order', default=1)\n allow_add_product = fields.Boolean('Add line', default=1)\n allow_lock_screen = fields.Boolean('Lock screen', default=0, help=\n 'When pos sessions start, cashiers required open POS viva pos pass pin (Setting/Users)'\n )\n display_point_receipt = fields.Boolean('Display point / receipt')\n loyalty_id = fields.Many2one('pos.loyalty', 'Loyalty', domain=[('state',\n '=', 'running')])\n promotion_ids = fields.Many2many('pos.promotion',\n 'pos_config_promotion_rel', 'config_id', 'promotion_id', string=\n 'Promotion programs')\n promotion_manual_select = fields.Boolean('Promotion manual choice',\n default=0)\n create_purchase_order = fields.Boolean('Create PO', default=0)\n create_purchase_order_required_signature = fields.Boolean(\n 'Required signature', default=0)\n purchase_order_state = fields.Selection([('confirm_order',\n 'Auto confirm'), ('confirm_picking', 'Auto delivery'), (\n 'confirm_invoice', 'Auto invoice')], 'PO state', help=\n 'This is state of purchase order will process to', default=\n 'confirm_invoice')\n sync_sale_order = fields.Boolean('Sync sale orders', default=0)\n sale_order = fields.Boolean('Create Sale order', default=0)\n sale_order_auto_confirm = fields.Boolean('Auto confirm', default=0)\n sale_order_auto_invoice = fields.Boolean('Auto paid', default=0)\n sale_order_auto_delivery = fields.Boolean('Auto delivery', default=0)\n pos_orders_management = fields.Boolean('POS order management', default=0)\n pos_order_period_return_days = fields.Float('Return period days', help=\n 'this is period time for customer can return order', default=30)\n display_return_days_receipt = fields.Boolean('Display return days receipt',\n default=0)\n sync_pricelist = fields.Boolean('Sync prices list', default=0)\n display_onhand = fields.Boolean('Show qty available product', default=1,\n help='Display quantity on hand all products on pos screen')\n large_stocks = fields.Boolean('Large stock', help=\n 'If count products bigger than 100,000 rows, please check it')\n allow_order_out_of_stock = fields.Boolean('Allow out-of-stock', default\n =1, help='If checked, allow cashier can add product have out of stock')\n allow_of_stock_approve_by_admin = fields.Boolean('Approve allow of stock',\n help='Allow manager approve allow of stock')\n print_voucher = fields.Boolean('Print vouchers', help=\n 'Reprint last vouchers', default=1)\n scan_voucher = fields.Boolean('Scan voucher', default=0)\n expired_days_voucher = fields.Integer('Expired days of voucher',\n default=30, help=\n 'Total days keep voucher can use, if out of period days from create date, voucher will expired'\n )\n sync_multi_session = fields.Boolean('Sync multi session', default=0)\n bus_id = fields.Many2one('pos.bus', string='Branch/store')\n display_person_add_line = fields.Boolean('Display information line',\n default=0, help=\n 'When you checked, on pos order lines screen, will display information person created order (lines) Eg: create date, updated date ..'\n )\n quickly_payment = fields.Boolean('Quickly payment', default=0)\n internal_transfer = fields.Boolean('Internal transfer', default=0, help\n ='Go Inventory and active multi warehouse and location')\n internal_transfer_auto_validate = fields.Boolean(\n 'Internal transfer auto validate', default=0)\n discount = fields.Boolean('Global discount', default=0)\n discount_ids = fields.Many2many('pos.global.discount',\n 'pos_config_pos_global_discount_rel', 'config_id', 'discount_id',\n 'Global discounts')\n is_customer_screen = fields.Boolean('Is customer screen')\n delay = fields.Integer('Delay time', default=3000)\n slogan = fields.Char('Slogan', help=\n 'This is message will display on screen of customer')\n image_ids = fields.One2many('pos.config.image', 'config_id', 'Images')\n tooltip = fields.Boolean('Show information of product', default=0)\n tooltip_show_last_price = fields.Boolean('Show last price of product',\n help='Show last price of items of customer have bought before',\n default=0)\n tooltip_show_minimum_sale_price = fields.Boolean(\n 'Show min of product sale price', help=\n 'Show minimum sale price of product', default=0)\n discount_limit = fields.Boolean('Discount limit', default=0)\n discount_limit_amount = fields.Float('Discount limit amount', default=10)\n discount_each_line = fields.Boolean('Discount each line')\n discount_unlock_limit = fields.Boolean('Manager can unlock limit')\n discount_unlock_limit_user_id = fields.Many2one('res.users',\n 'User unlock limit amount')\n multi_currency = fields.Boolean('Multi currency', default=0)\n multi_currency_update_rate = fields.Boolean('Update rate', default=0)\n notify_alert = fields.Boolean('Notify alert', help=\n 'Turn on/off notification alert on POS sessions.', default=0)\n return_products = fields.Boolean('Return orders', help=\n 'Allow cashier return orders, return products', default=0)\n receipt_without_payment_template = fields.Selection([('none', 'None'),\n ('display_price', 'Display price'), ('not_display_price',\n 'Not display price')], default='not_display_price', string=\n 'Receipt without payment template')\n lock_order_printed_receipt = fields.Boolean('Lock order printed receipt',\n default=0)\n staff_level = fields.Selection([('manual', 'Manual config'), (\n 'marketing', 'Marketing'), ('waiter', 'Waiter'), ('cashier',\n 'Cashier'), ('manager', 'Manager')], string='Staff level', default=\n 'manual')\n validate_payment = fields.Boolean('Validate payment')\n validate_remove_order = fields.Boolean('Validate remove order')\n validate_change_minus = fields.Boolean('Validate pressed +/-')\n validate_quantity_change = fields.Boolean('Validate quantity change')\n validate_price_change = fields.Boolean('Validate price change')\n validate_discount_change = fields.Boolean('Validate discount change')\n validate_close_session = fields.Boolean('Validate close session')\n validate_by_user_id = fields.Many2one('res.users', 'Validate by admin')\n apply_validate_return_mode = fields.Boolean('Validate return mode',\n help='If checked, only applied validate when return order', default=1)\n print_user_card = fields.Boolean('Print user card')\n product_operation = fields.Boolean('Product Operation', default=0, help\n ='Allow cashiers add pos categories and products on pos screen')\n quickly_payment_full = fields.Boolean('Quickly payment full')\n quickly_payment_full_journal_id = fields.Many2one('account.journal',\n 'Payment mode', domain=[('journal_user', '=', True)])\n daily_report = fields.Boolean('Daily report', default=0)\n note_order = fields.Boolean('Note order', default=0)\n note_orderline = fields.Boolean('Note order line', default=0)\n signature_order = fields.Boolean('Signature order', default=0)\n quickly_buttons = fields.Boolean('Quickly Actions', default=0)\n display_amount_discount = fields.Boolean('Display amount discount',\n default=0)\n booking_orders = fields.Boolean('Booking orders', default=0)\n booking_orders_required_cashier_signature = fields.Boolean(\n 'Book order required sessions signature', help=\n 'Checked if need required pos seller signature', default=0)\n booking_orders_alert = fields.Boolean('Alert when new order coming',\n default=0)\n delivery_orders = fields.Boolean('Delivery orders', help=\n 'Pos clients can get booking orders and delivery orders', default=0)\n booking_orders_display_shipping_receipt = fields.Boolean(\n 'Display shipping on receipt', default=0)\n display_tax_orderline = fields.Boolean('Display tax orderline', default=0)\n display_tax_receipt = fields.Boolean('Display tax receipt', default=0)\n display_fiscal_position_receipt = fields.Boolean(\n 'Display fiscal position on receipt', default=0)\n display_image_orderline = fields.Boolean('Display image order line',\n default=0)\n display_image_receipt = fields.Boolean('Display image receipt', default=0)\n duplicate_receipt = fields.Boolean('Duplicate Receipt')\n print_number = fields.Integer('Print number', help=\n 'How many number receipt need to print at printer ?', default=0)\n lock_session = fields.Boolean('Lock session', default=0)\n category_wise_receipt = fields.Boolean('Category wise receipt', default=0)\n management_invoice = fields.Boolean('Management Invoice', default=0)\n invoice_journal_ids = fields.Many2many('account.journal',\n 'pos_config_invoice_journal_rel', 'config_id', 'journal_id',\n 'Accounting Invoice Journal', domain=[('type', '=', 'sale')], help=\n 'Accounting journal use for create invoices.')\n send_invoice_email = fields.Boolean('Send email invoice', help=\n 'Help cashier send invoice to email of customer', default=0)\n lock_print_invoice_on_pos = fields.Boolean('Lock print invoice', help=\n 'Lock print pdf invoice when clicked button invoice', default=0)\n pos_auto_invoice = fields.Boolean('Auto create invoice', help=\n 'Automatic create invoice if order have client', default=0)\n receipt_invoice_number = fields.Boolean('Add invoice on receipt', help=\n 'Show invoice number on receipt header', default=0)\n receipt_customer_vat = fields.Boolean('Add vat customer on receipt',\n help='Show customer VAT(TIN) on receipt header', default=0)\n auto_register_payment = fields.Boolean('Auto invocie register payment',\n default=0)\n fiscal_position_auto_detect = fields.Boolean('Fiscal position auto detect',\n default=0)\n display_sale_price_within_tax = fields.Boolean(\n 'Display sale price within tax', default=0)\n display_cost_price = fields.Boolean('Display product cost price', default=0\n )\n display_product_ref = fields.Boolean('Display product ref', default=0)\n multi_location = fields.Boolean('Multi location', default=0)\n product_view = fields.Selection([('box', 'Box view'), ('list',\n 'List view')], default='box', string='View of products screen',\n required=1)\n ticket_font_size = fields.Integer('Ticket font size', default=12)\n customer_default_id = fields.Many2one('res.partner', 'Customer default')\n medical_insurance = fields.Boolean('Medical insurance', default=0)\n set_guest = fields.Boolean('Set guest', default=0)\n reset_sequence = fields.Boolean('Reset sequence order', default=0)\n update_tax = fields.Boolean('Modify tax', default=0, help=\n 'Cashier can change tax of order line')\n subtotal_tax_included = fields.Boolean('Show Tax-Included Prices', help\n ='When checked, subtotal of line will display amount have tax-included'\n )\n cash_out = fields.Boolean('Take money out', default=0, help=\n 'Allow cashiers take money out')\n cash_in = fields.Boolean('Push money in', default=0, help=\n 'Allow cashiers input money in')\n min_length_search = fields.Integer('Min character length search',\n default=3, help=\n 'Allow auto suggestion items when cashiers input on search box')\n review_receipt_before_paid = fields.Boolean('Review receipt before paid',\n help='Show receipt before paid order', default=1)\n keyboard_event = fields.Boolean('Keyboard event', default=0, help=\n 'Allow cashiers use shortcut keyboard')\n multi_variant = fields.Boolean('Multi variant', default=0, help=\n 'Allow cashiers change variant of order lines on pos screen')\n switch_user = fields.Boolean('Switch user', default=0, help=\n 'Allow cashiers switch to another cashier')\n change_unit_of_measure = fields.Boolean('Change unit of measure',\n default=0, help='Allow cashiers change unit of measure of order lines')\n print_last_order = fields.Boolean('Print last receipt', default=0, help\n ='Allow cashiers print last receipt')\n close_session = fields.Boolean('Close session', help=\n 'When cashiers click close pos, auto log out of system', default=0)\n display_image_product = fields.Boolean('Display image product', default\n =1, help='Allow hide/display product images on pos screen')\n printer_on_off = fields.Boolean('On/Off printer', help=\n 'Help cashier turn on/off printer viva posbox', default=0)\n check_duplicate_email = fields.Boolean('Check duplicate email', default=0)\n check_duplicate_phone = fields.Boolean('Check duplicate phone', default=0)\n hide_country = fields.Boolean('Hide country', default=0)\n hide_barcode = fields.Boolean('Hide barcode', default=0)\n hide_tax = fields.Boolean('Hide tax', default=0)\n hide_pricelist = fields.Boolean('Hide pricelists', default=0)\n hide_supplier = fields.Boolean('Hide suppiers', default=1)\n auto_remove_line = fields.Boolean('Auto remove line', default=1, help=\n 'When cashier set quantity of line to 0, line auto remove not keep line with qty is 0'\n )\n chat = fields.Boolean('Chat message', default=0, help=\n 'Allow chat, discuss between pos sessions')\n add_tags = fields.Boolean('Add tags line', default=0, help=\n 'Allow cashiers add tags to order lines')\n add_notes = fields.Boolean('Add notes line', default=0, help=\n 'Allow cashiers add notes to order lines')\n add_sale_person = fields.Boolean('Add sale person', default=0)\n logo = fields.Binary('Logo of store')\n paid_full = fields.Boolean('Allow paid full', default=0, help=\n 'Allow cashiers click one button, do payment full order')\n paid_partial = fields.Boolean('Allow partial payment', default=0, help=\n 'Allow cashiers do partial payment')\n backup = fields.Boolean('Backup/Restore orders', default=0, help=\n 'Allow cashiers backup and restore orders on pos screen')\n backup_orders = fields.Text('Backup orders')\n change_logo = fields.Boolean('Change logo', default=1, help=\n 'Allow cashiers change logo of shop on pos screen')\n management_session = fields.Boolean('Management session', default=0)\n barcode_receipt = fields.Boolean('Barcode receipt', default=0)\n hide_mobile = fields.Boolean('Hide mobile', default=1)\n hide_phone = fields.Boolean('Hide phone', default=1)\n hide_email = fields.Boolean('Hide email', default=1)\n update_client = fields.Boolean('Update client', help=\n 'Uncheck if you dont want cashier change customer information on pos')\n add_client = fields.Boolean('Add client', help=\n 'Uncheck if you dont want cashier add new customers on pos')\n remove_client = fields.Boolean('Remove client', help=\n 'Uncheck if you dont want cashier remove customers on pos')\n mobile_responsive = fields.Boolean('Mobile responsive', default=0)\n hide_amount_total = fields.Boolean('Hide amount total', default=1)\n hide_amount_taxes = fields.Boolean('Hide amount taxes', default=1)\n report_no_of_report = fields.Integer(string='No.of Copy Receipt', default=1\n )\n report_signature = fields.Boolean(string='Report Signature', default=1)\n report_product_summary = fields.Boolean(string='Report Product Summary',\n default=1)\n report_product_current_month_date = fields.Boolean(string=\n 'Report This Month', default=1)\n report_order_summary = fields.Boolean(string='Report Order Summary',\n default=1)\n report_order_current_month_date = fields.Boolean(string=\n 'Report Current Month', default=1)\n report_payment_summary = fields.Boolean(string='Report Payment Summary',\n default=1)\n report_payment_current_month_date = fields.Boolean(string=\n 'Payment Current Month', default=1)\n active_product_sort_by = fields.Boolean('Active product sort by', default=1\n )\n default_product_sort_by = fields.Selection([('a_z', 'Sort from A to Z'),\n ('z_a', 'Sort from Z to A'), ('low_price',\n 'Sort from low to high price'), ('high_price',\n 'Sort from high to low price'), ('pos_sequence',\n 'Product pos sequence')], string='Default sort by', default='a_z')\n sale_extra = fields.Boolean('Sale extra', default=1)\n required_add_customer_before_put_product_to_cart = fields.Boolean(\n 'Required add customer first', help=\n 'If you checked on this checkbox, in POS always required cashier add customer the first'\n )\n only_one_time_add_customer = fields.Boolean('Only one time add customer',\n help='Each orders, only one time add customer')\n use_parameters = fields.Boolean('Use parameters', help=\n 'POS need only one time save parameter datas use on POS, and next times no need call backend'\n , default=1)\n time_refresh_parameter = fields.Integer('Time refresh datas (seconds)',\n help='Time for refresh parameters data', default=30)\n\n @api.model\n def switch_mobile_mode(self, config_id, vals):\n if vals.get('mobile_responsive') == True:\n vals['product_view'] = 'box'\n return self.browse(config_id).sudo().write(vals)\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n\n @api.onchange('receipt_invoice_number')\n def _onchange_receipt_invoice_number(self):\n if self.receipt_invoice_number == True:\n self.lock_print_invoice_on_pos = False\n else:\n self.lock_print_invoice_on_pos = True\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if (config.management_session and not config.\n default_cashbox_lines_ids and not config.cash_control):\n raise UserError('Please go to Cash control and add Default Opening'\n )\n if (config.validate_by_user_id and not config.validate_by_user_id.\n pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_voucher_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'voucher'})\n Account = self.env['account.account']\n voucher_account_old_version = Account.sudo().search([('code', '=',\n 'AVC'), ('company_id', '=', user.company_id.id)])\n if voucher_account_old_version:\n voucher_account = voucher_account_old_version[0]\n else:\n voucher_account = Account.sudo().create({'name':\n 'Account voucher', 'code': 'AVC', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AVC\" auto give voucher histories of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_voucher' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n voucher_account.id, 'noupdate': True})\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'voucher')])\n if voucher_journal:\n voucher_journal[0].sudo().write({'voucher': True,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id,\n 'pos_method_type': 'voucher', 'sequence': 101})\n voucher_journal = voucher_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Voucher ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'AVC ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n voucher_journal = Journal.sudo().create({'name': 'Voucher',\n 'code': 'VCJ', 'type': 'cash', 'pos_method_type': 'voucher',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id, 'sequence':\n 101})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_voucher_' + str(voucher_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n voucher_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, voucher_journal.id)]})\n statement = [(0, 0, {'journal_id': voucher_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_credit_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'credit'})\n Account = self.env['account.account']\n credit_account_old_version = Account.sudo().search([('code', '=',\n 'ACJ'), ('company_id', '=', user.company_id.id)])\n if credit_account_old_version:\n credit_account = credit_account_old_version[0]\n else:\n credit_account = Account.sudo().create({'name':\n 'Credit Account', 'code': 'CA', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"CA\" give credit payment customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_credit' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n credit_account.id, 'noupdate': True})\n credit_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'credit')])\n if credit_journal:\n credit_journal[0].sudo().write({'credit': True,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'pos_method_type': 'credit', 'sequence': 102})\n credit_journal = credit_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Credit account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'CA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n credit_journal = Journal.sudo().create({'name':\n 'Customer Credit', 'code': 'CJ', 'type': 'cash',\n 'pos_method_type': 'credit', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': credit_account.\n id, 'default_credit_account_id': credit_account.id,\n 'sequence': 102})\n self.env['ir.model.data'].sudo().create({'name': \n 'credit_journal_' + str(credit_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n credit_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, credit_journal.id)]})\n statement = [(0, 0, {'journal_id': credit_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_rounding_journal(self):\n Journal = self.env['account.journal']\n Account = self.env['account.account']\n user = self.env.user\n rounding_journal = Journal.sudo().search([('code', '=', 'RDJ'), (\n 'company_id', '=', user.company_id.id)])\n if rounding_journal:\n return rounding_journal.sudo().write({'pos_method_type':\n 'rounding'})\n rounding_account_old_version = Account.sudo().search([('code', '=',\n 'AAR'), ('company_id', '=', user.company_id.id)])\n if rounding_account_old_version:\n rounding_account = rounding_account_old_version[0]\n else:\n _logger.info('rounding_account have not')\n rounding_account = Account.sudo().create({'name':\n 'Rounding Account', 'code': 'AAR', 'user_type_id': self.env\n .ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AAR\" give rounding pos order'})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n rounding_account.id, 'noupdate': True})\n rounding_journal = Journal.sudo().search([('pos_method_type', '=',\n 'rounding'), ('company_id', '=', user.company_id.id)])\n if rounding_journal:\n rounding_journal[0].sudo().write({'name': 'Rounding',\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'pos_method_type': 'rounding', 'code': 'RDJ'})\n rounding_journal = rounding_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'rounding account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n rounding_journal = Journal.sudo().create({'name': 'Rounding',\n 'code': 'RDJ', 'type': 'cash', 'pos_method_type':\n 'rounding', 'journal_user': True, 'sequence_id':\n new_sequence.id, 'company_id': user.company_id.id,\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_journal_' + str(rounding_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n rounding_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, rounding_journal.id)]})\n statement = [(0, 0, {'journal_id': rounding_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\ntry:\n to_unicode = unicode\nexcept NameError:\n to_unicode = str\n<assignment token>\n\n\nclass pos_config_image(models.Model):\n _name = 'pos.config.image'\n _description = 'Image show to customer screen'\n name = fields.Char('Title', required=1)\n image = fields.Binary('Image', required=1)\n config_id = fields.Many2one('pos.config', 'POS config', required=1)\n description = fields.Text('Description')\n\n\nclass pos_config(models.Model):\n _inherit = 'pos.config'\n user_id = fields.Many2one('res.users', 'Assigned to')\n config_access_right = fields.Boolean('Config access right', default=1)\n allow_discount = fields.Boolean('Change discount', default=1)\n allow_qty = fields.Boolean('Change quantity', default=1)\n allow_price = fields.Boolean('Change price', default=1)\n allow_remove_line = fields.Boolean('Remove line', default=1)\n allow_numpad = fields.Boolean('Display numpad', default=1)\n allow_payment = fields.Boolean('Display payment', default=1)\n allow_customer = fields.Boolean('Choice customer', default=1)\n allow_add_order = fields.Boolean('New order', default=1)\n allow_remove_order = fields.Boolean('Remove order', default=1)\n allow_add_product = fields.Boolean('Add line', default=1)\n allow_lock_screen = fields.Boolean('Lock screen', default=0, help=\n 'When pos sessions start, cashiers required open POS viva pos pass pin (Setting/Users)'\n )\n display_point_receipt = fields.Boolean('Display point / receipt')\n loyalty_id = fields.Many2one('pos.loyalty', 'Loyalty', domain=[('state',\n '=', 'running')])\n promotion_ids = fields.Many2many('pos.promotion',\n 'pos_config_promotion_rel', 'config_id', 'promotion_id', string=\n 'Promotion programs')\n promotion_manual_select = fields.Boolean('Promotion manual choice',\n default=0)\n create_purchase_order = fields.Boolean('Create PO', default=0)\n create_purchase_order_required_signature = fields.Boolean(\n 'Required signature', default=0)\n purchase_order_state = fields.Selection([('confirm_order',\n 'Auto confirm'), ('confirm_picking', 'Auto delivery'), (\n 'confirm_invoice', 'Auto invoice')], 'PO state', help=\n 'This is state of purchase order will process to', default=\n 'confirm_invoice')\n sync_sale_order = fields.Boolean('Sync sale orders', default=0)\n sale_order = fields.Boolean('Create Sale order', default=0)\n sale_order_auto_confirm = fields.Boolean('Auto confirm', default=0)\n sale_order_auto_invoice = fields.Boolean('Auto paid', default=0)\n sale_order_auto_delivery = fields.Boolean('Auto delivery', default=0)\n pos_orders_management = fields.Boolean('POS order management', default=0)\n pos_order_period_return_days = fields.Float('Return period days', help=\n 'this is period time for customer can return order', default=30)\n display_return_days_receipt = fields.Boolean('Display return days receipt',\n default=0)\n sync_pricelist = fields.Boolean('Sync prices list', default=0)\n display_onhand = fields.Boolean('Show qty available product', default=1,\n help='Display quantity on hand all products on pos screen')\n large_stocks = fields.Boolean('Large stock', help=\n 'If count products bigger than 100,000 rows, please check it')\n allow_order_out_of_stock = fields.Boolean('Allow out-of-stock', default\n =1, help='If checked, allow cashier can add product have out of stock')\n allow_of_stock_approve_by_admin = fields.Boolean('Approve allow of stock',\n help='Allow manager approve allow of stock')\n print_voucher = fields.Boolean('Print vouchers', help=\n 'Reprint last vouchers', default=1)\n scan_voucher = fields.Boolean('Scan voucher', default=0)\n expired_days_voucher = fields.Integer('Expired days of voucher',\n default=30, help=\n 'Total days keep voucher can use, if out of period days from create date, voucher will expired'\n )\n sync_multi_session = fields.Boolean('Sync multi session', default=0)\n bus_id = fields.Many2one('pos.bus', string='Branch/store')\n display_person_add_line = fields.Boolean('Display information line',\n default=0, help=\n 'When you checked, on pos order lines screen, will display information person created order (lines) Eg: create date, updated date ..'\n )\n quickly_payment = fields.Boolean('Quickly payment', default=0)\n internal_transfer = fields.Boolean('Internal transfer', default=0, help\n ='Go Inventory and active multi warehouse and location')\n internal_transfer_auto_validate = fields.Boolean(\n 'Internal transfer auto validate', default=0)\n discount = fields.Boolean('Global discount', default=0)\n discount_ids = fields.Many2many('pos.global.discount',\n 'pos_config_pos_global_discount_rel', 'config_id', 'discount_id',\n 'Global discounts')\n is_customer_screen = fields.Boolean('Is customer screen')\n delay = fields.Integer('Delay time', default=3000)\n slogan = fields.Char('Slogan', help=\n 'This is message will display on screen of customer')\n image_ids = fields.One2many('pos.config.image', 'config_id', 'Images')\n tooltip = fields.Boolean('Show information of product', default=0)\n tooltip_show_last_price = fields.Boolean('Show last price of product',\n help='Show last price of items of customer have bought before',\n default=0)\n tooltip_show_minimum_sale_price = fields.Boolean(\n 'Show min of product sale price', help=\n 'Show minimum sale price of product', default=0)\n discount_limit = fields.Boolean('Discount limit', default=0)\n discount_limit_amount = fields.Float('Discount limit amount', default=10)\n discount_each_line = fields.Boolean('Discount each line')\n discount_unlock_limit = fields.Boolean('Manager can unlock limit')\n discount_unlock_limit_user_id = fields.Many2one('res.users',\n 'User unlock limit amount')\n multi_currency = fields.Boolean('Multi currency', default=0)\n multi_currency_update_rate = fields.Boolean('Update rate', default=0)\n notify_alert = fields.Boolean('Notify alert', help=\n 'Turn on/off notification alert on POS sessions.', default=0)\n return_products = fields.Boolean('Return orders', help=\n 'Allow cashier return orders, return products', default=0)\n receipt_without_payment_template = fields.Selection([('none', 'None'),\n ('display_price', 'Display price'), ('not_display_price',\n 'Not display price')], default='not_display_price', string=\n 'Receipt without payment template')\n lock_order_printed_receipt = fields.Boolean('Lock order printed receipt',\n default=0)\n staff_level = fields.Selection([('manual', 'Manual config'), (\n 'marketing', 'Marketing'), ('waiter', 'Waiter'), ('cashier',\n 'Cashier'), ('manager', 'Manager')], string='Staff level', default=\n 'manual')\n validate_payment = fields.Boolean('Validate payment')\n validate_remove_order = fields.Boolean('Validate remove order')\n validate_change_minus = fields.Boolean('Validate pressed +/-')\n validate_quantity_change = fields.Boolean('Validate quantity change')\n validate_price_change = fields.Boolean('Validate price change')\n validate_discount_change = fields.Boolean('Validate discount change')\n validate_close_session = fields.Boolean('Validate close session')\n validate_by_user_id = fields.Many2one('res.users', 'Validate by admin')\n apply_validate_return_mode = fields.Boolean('Validate return mode',\n help='If checked, only applied validate when return order', default=1)\n print_user_card = fields.Boolean('Print user card')\n product_operation = fields.Boolean('Product Operation', default=0, help\n ='Allow cashiers add pos categories and products on pos screen')\n quickly_payment_full = fields.Boolean('Quickly payment full')\n quickly_payment_full_journal_id = fields.Many2one('account.journal',\n 'Payment mode', domain=[('journal_user', '=', True)])\n daily_report = fields.Boolean('Daily report', default=0)\n note_order = fields.Boolean('Note order', default=0)\n note_orderline = fields.Boolean('Note order line', default=0)\n signature_order = fields.Boolean('Signature order', default=0)\n quickly_buttons = fields.Boolean('Quickly Actions', default=0)\n display_amount_discount = fields.Boolean('Display amount discount',\n default=0)\n booking_orders = fields.Boolean('Booking orders', default=0)\n booking_orders_required_cashier_signature = fields.Boolean(\n 'Book order required sessions signature', help=\n 'Checked if need required pos seller signature', default=0)\n booking_orders_alert = fields.Boolean('Alert when new order coming',\n default=0)\n delivery_orders = fields.Boolean('Delivery orders', help=\n 'Pos clients can get booking orders and delivery orders', default=0)\n booking_orders_display_shipping_receipt = fields.Boolean(\n 'Display shipping on receipt', default=0)\n display_tax_orderline = fields.Boolean('Display tax orderline', default=0)\n display_tax_receipt = fields.Boolean('Display tax receipt', default=0)\n display_fiscal_position_receipt = fields.Boolean(\n 'Display fiscal position on receipt', default=0)\n display_image_orderline = fields.Boolean('Display image order line',\n default=0)\n display_image_receipt = fields.Boolean('Display image receipt', default=0)\n duplicate_receipt = fields.Boolean('Duplicate Receipt')\n print_number = fields.Integer('Print number', help=\n 'How many number receipt need to print at printer ?', default=0)\n lock_session = fields.Boolean('Lock session', default=0)\n category_wise_receipt = fields.Boolean('Category wise receipt', default=0)\n management_invoice = fields.Boolean('Management Invoice', default=0)\n invoice_journal_ids = fields.Many2many('account.journal',\n 'pos_config_invoice_journal_rel', 'config_id', 'journal_id',\n 'Accounting Invoice Journal', domain=[('type', '=', 'sale')], help=\n 'Accounting journal use for create invoices.')\n send_invoice_email = fields.Boolean('Send email invoice', help=\n 'Help cashier send invoice to email of customer', default=0)\n lock_print_invoice_on_pos = fields.Boolean('Lock print invoice', help=\n 'Lock print pdf invoice when clicked button invoice', default=0)\n pos_auto_invoice = fields.Boolean('Auto create invoice', help=\n 'Automatic create invoice if order have client', default=0)\n receipt_invoice_number = fields.Boolean('Add invoice on receipt', help=\n 'Show invoice number on receipt header', default=0)\n receipt_customer_vat = fields.Boolean('Add vat customer on receipt',\n help='Show customer VAT(TIN) on receipt header', default=0)\n auto_register_payment = fields.Boolean('Auto invocie register payment',\n default=0)\n fiscal_position_auto_detect = fields.Boolean('Fiscal position auto detect',\n default=0)\n display_sale_price_within_tax = fields.Boolean(\n 'Display sale price within tax', default=0)\n display_cost_price = fields.Boolean('Display product cost price', default=0\n )\n display_product_ref = fields.Boolean('Display product ref', default=0)\n multi_location = fields.Boolean('Multi location', default=0)\n product_view = fields.Selection([('box', 'Box view'), ('list',\n 'List view')], default='box', string='View of products screen',\n required=1)\n ticket_font_size = fields.Integer('Ticket font size', default=12)\n customer_default_id = fields.Many2one('res.partner', 'Customer default')\n medical_insurance = fields.Boolean('Medical insurance', default=0)\n set_guest = fields.Boolean('Set guest', default=0)\n reset_sequence = fields.Boolean('Reset sequence order', default=0)\n update_tax = fields.Boolean('Modify tax', default=0, help=\n 'Cashier can change tax of order line')\n subtotal_tax_included = fields.Boolean('Show Tax-Included Prices', help\n ='When checked, subtotal of line will display amount have tax-included'\n )\n cash_out = fields.Boolean('Take money out', default=0, help=\n 'Allow cashiers take money out')\n cash_in = fields.Boolean('Push money in', default=0, help=\n 'Allow cashiers input money in')\n min_length_search = fields.Integer('Min character length search',\n default=3, help=\n 'Allow auto suggestion items when cashiers input on search box')\n review_receipt_before_paid = fields.Boolean('Review receipt before paid',\n help='Show receipt before paid order', default=1)\n keyboard_event = fields.Boolean('Keyboard event', default=0, help=\n 'Allow cashiers use shortcut keyboard')\n multi_variant = fields.Boolean('Multi variant', default=0, help=\n 'Allow cashiers change variant of order lines on pos screen')\n switch_user = fields.Boolean('Switch user', default=0, help=\n 'Allow cashiers switch to another cashier')\n change_unit_of_measure = fields.Boolean('Change unit of measure',\n default=0, help='Allow cashiers change unit of measure of order lines')\n print_last_order = fields.Boolean('Print last receipt', default=0, help\n ='Allow cashiers print last receipt')\n close_session = fields.Boolean('Close session', help=\n 'When cashiers click close pos, auto log out of system', default=0)\n display_image_product = fields.Boolean('Display image product', default\n =1, help='Allow hide/display product images on pos screen')\n printer_on_off = fields.Boolean('On/Off printer', help=\n 'Help cashier turn on/off printer viva posbox', default=0)\n check_duplicate_email = fields.Boolean('Check duplicate email', default=0)\n check_duplicate_phone = fields.Boolean('Check duplicate phone', default=0)\n hide_country = fields.Boolean('Hide country', default=0)\n hide_barcode = fields.Boolean('Hide barcode', default=0)\n hide_tax = fields.Boolean('Hide tax', default=0)\n hide_pricelist = fields.Boolean('Hide pricelists', default=0)\n hide_supplier = fields.Boolean('Hide suppiers', default=1)\n auto_remove_line = fields.Boolean('Auto remove line', default=1, help=\n 'When cashier set quantity of line to 0, line auto remove not keep line with qty is 0'\n )\n chat = fields.Boolean('Chat message', default=0, help=\n 'Allow chat, discuss between pos sessions')\n add_tags = fields.Boolean('Add tags line', default=0, help=\n 'Allow cashiers add tags to order lines')\n add_notes = fields.Boolean('Add notes line', default=0, help=\n 'Allow cashiers add notes to order lines')\n add_sale_person = fields.Boolean('Add sale person', default=0)\n logo = fields.Binary('Logo of store')\n paid_full = fields.Boolean('Allow paid full', default=0, help=\n 'Allow cashiers click one button, do payment full order')\n paid_partial = fields.Boolean('Allow partial payment', default=0, help=\n 'Allow cashiers do partial payment')\n backup = fields.Boolean('Backup/Restore orders', default=0, help=\n 'Allow cashiers backup and restore orders on pos screen')\n backup_orders = fields.Text('Backup orders')\n change_logo = fields.Boolean('Change logo', default=1, help=\n 'Allow cashiers change logo of shop on pos screen')\n management_session = fields.Boolean('Management session', default=0)\n barcode_receipt = fields.Boolean('Barcode receipt', default=0)\n hide_mobile = fields.Boolean('Hide mobile', default=1)\n hide_phone = fields.Boolean('Hide phone', default=1)\n hide_email = fields.Boolean('Hide email', default=1)\n update_client = fields.Boolean('Update client', help=\n 'Uncheck if you dont want cashier change customer information on pos')\n add_client = fields.Boolean('Add client', help=\n 'Uncheck if you dont want cashier add new customers on pos')\n remove_client = fields.Boolean('Remove client', help=\n 'Uncheck if you dont want cashier remove customers on pos')\n mobile_responsive = fields.Boolean('Mobile responsive', default=0)\n hide_amount_total = fields.Boolean('Hide amount total', default=1)\n hide_amount_taxes = fields.Boolean('Hide amount taxes', default=1)\n report_no_of_report = fields.Integer(string='No.of Copy Receipt', default=1\n )\n report_signature = fields.Boolean(string='Report Signature', default=1)\n report_product_summary = fields.Boolean(string='Report Product Summary',\n default=1)\n report_product_current_month_date = fields.Boolean(string=\n 'Report This Month', default=1)\n report_order_summary = fields.Boolean(string='Report Order Summary',\n default=1)\n report_order_current_month_date = fields.Boolean(string=\n 'Report Current Month', default=1)\n report_payment_summary = fields.Boolean(string='Report Payment Summary',\n default=1)\n report_payment_current_month_date = fields.Boolean(string=\n 'Payment Current Month', default=1)\n active_product_sort_by = fields.Boolean('Active product sort by', default=1\n )\n default_product_sort_by = fields.Selection([('a_z', 'Sort from A to Z'),\n ('z_a', 'Sort from Z to A'), ('low_price',\n 'Sort from low to high price'), ('high_price',\n 'Sort from high to low price'), ('pos_sequence',\n 'Product pos sequence')], string='Default sort by', default='a_z')\n sale_extra = fields.Boolean('Sale extra', default=1)\n required_add_customer_before_put_product_to_cart = fields.Boolean(\n 'Required add customer first', help=\n 'If you checked on this checkbox, in POS always required cashier add customer the first'\n )\n only_one_time_add_customer = fields.Boolean('Only one time add customer',\n help='Each orders, only one time add customer')\n use_parameters = fields.Boolean('Use parameters', help=\n 'POS need only one time save parameter datas use on POS, and next times no need call backend'\n , default=1)\n time_refresh_parameter = fields.Integer('Time refresh datas (seconds)',\n help='Time for refresh parameters data', default=30)\n\n @api.model\n def switch_mobile_mode(self, config_id, vals):\n if vals.get('mobile_responsive') == True:\n vals['product_view'] = 'box'\n return self.browse(config_id).sudo().write(vals)\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n\n @api.onchange('receipt_invoice_number')\n def _onchange_receipt_invoice_number(self):\n if self.receipt_invoice_number == True:\n self.lock_print_invoice_on_pos = False\n else:\n self.lock_print_invoice_on_pos = True\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if (config.management_session and not config.\n default_cashbox_lines_ids and not config.cash_control):\n raise UserError('Please go to Cash control and add Default Opening'\n )\n if (config.validate_by_user_id and not config.validate_by_user_id.\n pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_voucher_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'voucher'})\n Account = self.env['account.account']\n voucher_account_old_version = Account.sudo().search([('code', '=',\n 'AVC'), ('company_id', '=', user.company_id.id)])\n if voucher_account_old_version:\n voucher_account = voucher_account_old_version[0]\n else:\n voucher_account = Account.sudo().create({'name':\n 'Account voucher', 'code': 'AVC', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AVC\" auto give voucher histories of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_voucher' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n voucher_account.id, 'noupdate': True})\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'voucher')])\n if voucher_journal:\n voucher_journal[0].sudo().write({'voucher': True,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id,\n 'pos_method_type': 'voucher', 'sequence': 101})\n voucher_journal = voucher_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Voucher ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'AVC ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n voucher_journal = Journal.sudo().create({'name': 'Voucher',\n 'code': 'VCJ', 'type': 'cash', 'pos_method_type': 'voucher',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id, 'sequence':\n 101})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_voucher_' + str(voucher_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n voucher_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, voucher_journal.id)]})\n statement = [(0, 0, {'journal_id': voucher_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_credit_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'credit'})\n Account = self.env['account.account']\n credit_account_old_version = Account.sudo().search([('code', '=',\n 'ACJ'), ('company_id', '=', user.company_id.id)])\n if credit_account_old_version:\n credit_account = credit_account_old_version[0]\n else:\n credit_account = Account.sudo().create({'name':\n 'Credit Account', 'code': 'CA', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"CA\" give credit payment customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_credit' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n credit_account.id, 'noupdate': True})\n credit_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'credit')])\n if credit_journal:\n credit_journal[0].sudo().write({'credit': True,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'pos_method_type': 'credit', 'sequence': 102})\n credit_journal = credit_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Credit account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'CA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n credit_journal = Journal.sudo().create({'name':\n 'Customer Credit', 'code': 'CJ', 'type': 'cash',\n 'pos_method_type': 'credit', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': credit_account.\n id, 'default_credit_account_id': credit_account.id,\n 'sequence': 102})\n self.env['ir.model.data'].sudo().create({'name': \n 'credit_journal_' + str(credit_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n credit_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, credit_journal.id)]})\n statement = [(0, 0, {'journal_id': credit_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_rounding_journal(self):\n Journal = self.env['account.journal']\n Account = self.env['account.account']\n user = self.env.user\n rounding_journal = Journal.sudo().search([('code', '=', 'RDJ'), (\n 'company_id', '=', user.company_id.id)])\n if rounding_journal:\n return rounding_journal.sudo().write({'pos_method_type':\n 'rounding'})\n rounding_account_old_version = Account.sudo().search([('code', '=',\n 'AAR'), ('company_id', '=', user.company_id.id)])\n if rounding_account_old_version:\n rounding_account = rounding_account_old_version[0]\n else:\n _logger.info('rounding_account have not')\n rounding_account = Account.sudo().create({'name':\n 'Rounding Account', 'code': 'AAR', 'user_type_id': self.env\n .ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AAR\" give rounding pos order'})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n rounding_account.id, 'noupdate': True})\n rounding_journal = Journal.sudo().search([('pos_method_type', '=',\n 'rounding'), ('company_id', '=', user.company_id.id)])\n if rounding_journal:\n rounding_journal[0].sudo().write({'name': 'Rounding',\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'pos_method_type': 'rounding', 'code': 'RDJ'})\n rounding_journal = rounding_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'rounding account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n rounding_journal = Journal.sudo().create({'name': 'Rounding',\n 'code': 'RDJ', 'type': 'cash', 'pos_method_type':\n 'rounding', 'journal_user': True, 'sequence_id':\n new_sequence.id, 'company_id': user.company_id.id,\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_journal_' + str(rounding_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n rounding_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, rounding_journal.id)]})\n statement = [(0, 0, {'journal_id': rounding_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n\n\nclass pos_config_image(models.Model):\n _name = 'pos.config.image'\n _description = 'Image show to customer screen'\n name = fields.Char('Title', required=1)\n image = fields.Binary('Image', required=1)\n config_id = fields.Many2one('pos.config', 'POS config', required=1)\n description = fields.Text('Description')\n\n\nclass pos_config(models.Model):\n _inherit = 'pos.config'\n user_id = fields.Many2one('res.users', 'Assigned to')\n config_access_right = fields.Boolean('Config access right', default=1)\n allow_discount = fields.Boolean('Change discount', default=1)\n allow_qty = fields.Boolean('Change quantity', default=1)\n allow_price = fields.Boolean('Change price', default=1)\n allow_remove_line = fields.Boolean('Remove line', default=1)\n allow_numpad = fields.Boolean('Display numpad', default=1)\n allow_payment = fields.Boolean('Display payment', default=1)\n allow_customer = fields.Boolean('Choice customer', default=1)\n allow_add_order = fields.Boolean('New order', default=1)\n allow_remove_order = fields.Boolean('Remove order', default=1)\n allow_add_product = fields.Boolean('Add line', default=1)\n allow_lock_screen = fields.Boolean('Lock screen', default=0, help=\n 'When pos sessions start, cashiers required open POS viva pos pass pin (Setting/Users)'\n )\n display_point_receipt = fields.Boolean('Display point / receipt')\n loyalty_id = fields.Many2one('pos.loyalty', 'Loyalty', domain=[('state',\n '=', 'running')])\n promotion_ids = fields.Many2many('pos.promotion',\n 'pos_config_promotion_rel', 'config_id', 'promotion_id', string=\n 'Promotion programs')\n promotion_manual_select = fields.Boolean('Promotion manual choice',\n default=0)\n create_purchase_order = fields.Boolean('Create PO', default=0)\n create_purchase_order_required_signature = fields.Boolean(\n 'Required signature', default=0)\n purchase_order_state = fields.Selection([('confirm_order',\n 'Auto confirm'), ('confirm_picking', 'Auto delivery'), (\n 'confirm_invoice', 'Auto invoice')], 'PO state', help=\n 'This is state of purchase order will process to', default=\n 'confirm_invoice')\n sync_sale_order = fields.Boolean('Sync sale orders', default=0)\n sale_order = fields.Boolean('Create Sale order', default=0)\n sale_order_auto_confirm = fields.Boolean('Auto confirm', default=0)\n sale_order_auto_invoice = fields.Boolean('Auto paid', default=0)\n sale_order_auto_delivery = fields.Boolean('Auto delivery', default=0)\n pos_orders_management = fields.Boolean('POS order management', default=0)\n pos_order_period_return_days = fields.Float('Return period days', help=\n 'this is period time for customer can return order', default=30)\n display_return_days_receipt = fields.Boolean('Display return days receipt',\n default=0)\n sync_pricelist = fields.Boolean('Sync prices list', default=0)\n display_onhand = fields.Boolean('Show qty available product', default=1,\n help='Display quantity on hand all products on pos screen')\n large_stocks = fields.Boolean('Large stock', help=\n 'If count products bigger than 100,000 rows, please check it')\n allow_order_out_of_stock = fields.Boolean('Allow out-of-stock', default\n =1, help='If checked, allow cashier can add product have out of stock')\n allow_of_stock_approve_by_admin = fields.Boolean('Approve allow of stock',\n help='Allow manager approve allow of stock')\n print_voucher = fields.Boolean('Print vouchers', help=\n 'Reprint last vouchers', default=1)\n scan_voucher = fields.Boolean('Scan voucher', default=0)\n expired_days_voucher = fields.Integer('Expired days of voucher',\n default=30, help=\n 'Total days keep voucher can use, if out of period days from create date, voucher will expired'\n )\n sync_multi_session = fields.Boolean('Sync multi session', default=0)\n bus_id = fields.Many2one('pos.bus', string='Branch/store')\n display_person_add_line = fields.Boolean('Display information line',\n default=0, help=\n 'When you checked, on pos order lines screen, will display information person created order (lines) Eg: create date, updated date ..'\n )\n quickly_payment = fields.Boolean('Quickly payment', default=0)\n internal_transfer = fields.Boolean('Internal transfer', default=0, help\n ='Go Inventory and active multi warehouse and location')\n internal_transfer_auto_validate = fields.Boolean(\n 'Internal transfer auto validate', default=0)\n discount = fields.Boolean('Global discount', default=0)\n discount_ids = fields.Many2many('pos.global.discount',\n 'pos_config_pos_global_discount_rel', 'config_id', 'discount_id',\n 'Global discounts')\n is_customer_screen = fields.Boolean('Is customer screen')\n delay = fields.Integer('Delay time', default=3000)\n slogan = fields.Char('Slogan', help=\n 'This is message will display on screen of customer')\n image_ids = fields.One2many('pos.config.image', 'config_id', 'Images')\n tooltip = fields.Boolean('Show information of product', default=0)\n tooltip_show_last_price = fields.Boolean('Show last price of product',\n help='Show last price of items of customer have bought before',\n default=0)\n tooltip_show_minimum_sale_price = fields.Boolean(\n 'Show min of product sale price', help=\n 'Show minimum sale price of product', default=0)\n discount_limit = fields.Boolean('Discount limit', default=0)\n discount_limit_amount = fields.Float('Discount limit amount', default=10)\n discount_each_line = fields.Boolean('Discount each line')\n discount_unlock_limit = fields.Boolean('Manager can unlock limit')\n discount_unlock_limit_user_id = fields.Many2one('res.users',\n 'User unlock limit amount')\n multi_currency = fields.Boolean('Multi currency', default=0)\n multi_currency_update_rate = fields.Boolean('Update rate', default=0)\n notify_alert = fields.Boolean('Notify alert', help=\n 'Turn on/off notification alert on POS sessions.', default=0)\n return_products = fields.Boolean('Return orders', help=\n 'Allow cashier return orders, return products', default=0)\n receipt_without_payment_template = fields.Selection([('none', 'None'),\n ('display_price', 'Display price'), ('not_display_price',\n 'Not display price')], default='not_display_price', string=\n 'Receipt without payment template')\n lock_order_printed_receipt = fields.Boolean('Lock order printed receipt',\n default=0)\n staff_level = fields.Selection([('manual', 'Manual config'), (\n 'marketing', 'Marketing'), ('waiter', 'Waiter'), ('cashier',\n 'Cashier'), ('manager', 'Manager')], string='Staff level', default=\n 'manual')\n validate_payment = fields.Boolean('Validate payment')\n validate_remove_order = fields.Boolean('Validate remove order')\n validate_change_minus = fields.Boolean('Validate pressed +/-')\n validate_quantity_change = fields.Boolean('Validate quantity change')\n validate_price_change = fields.Boolean('Validate price change')\n validate_discount_change = fields.Boolean('Validate discount change')\n validate_close_session = fields.Boolean('Validate close session')\n validate_by_user_id = fields.Many2one('res.users', 'Validate by admin')\n apply_validate_return_mode = fields.Boolean('Validate return mode',\n help='If checked, only applied validate when return order', default=1)\n print_user_card = fields.Boolean('Print user card')\n product_operation = fields.Boolean('Product Operation', default=0, help\n ='Allow cashiers add pos categories and products on pos screen')\n quickly_payment_full = fields.Boolean('Quickly payment full')\n quickly_payment_full_journal_id = fields.Many2one('account.journal',\n 'Payment mode', domain=[('journal_user', '=', True)])\n daily_report = fields.Boolean('Daily report', default=0)\n note_order = fields.Boolean('Note order', default=0)\n note_orderline = fields.Boolean('Note order line', default=0)\n signature_order = fields.Boolean('Signature order', default=0)\n quickly_buttons = fields.Boolean('Quickly Actions', default=0)\n display_amount_discount = fields.Boolean('Display amount discount',\n default=0)\n booking_orders = fields.Boolean('Booking orders', default=0)\n booking_orders_required_cashier_signature = fields.Boolean(\n 'Book order required sessions signature', help=\n 'Checked if need required pos seller signature', default=0)\n booking_orders_alert = fields.Boolean('Alert when new order coming',\n default=0)\n delivery_orders = fields.Boolean('Delivery orders', help=\n 'Pos clients can get booking orders and delivery orders', default=0)\n booking_orders_display_shipping_receipt = fields.Boolean(\n 'Display shipping on receipt', default=0)\n display_tax_orderline = fields.Boolean('Display tax orderline', default=0)\n display_tax_receipt = fields.Boolean('Display tax receipt', default=0)\n display_fiscal_position_receipt = fields.Boolean(\n 'Display fiscal position on receipt', default=0)\n display_image_orderline = fields.Boolean('Display image order line',\n default=0)\n display_image_receipt = fields.Boolean('Display image receipt', default=0)\n duplicate_receipt = fields.Boolean('Duplicate Receipt')\n print_number = fields.Integer('Print number', help=\n 'How many number receipt need to print at printer ?', default=0)\n lock_session = fields.Boolean('Lock session', default=0)\n category_wise_receipt = fields.Boolean('Category wise receipt', default=0)\n management_invoice = fields.Boolean('Management Invoice', default=0)\n invoice_journal_ids = fields.Many2many('account.journal',\n 'pos_config_invoice_journal_rel', 'config_id', 'journal_id',\n 'Accounting Invoice Journal', domain=[('type', '=', 'sale')], help=\n 'Accounting journal use for create invoices.')\n send_invoice_email = fields.Boolean('Send email invoice', help=\n 'Help cashier send invoice to email of customer', default=0)\n lock_print_invoice_on_pos = fields.Boolean('Lock print invoice', help=\n 'Lock print pdf invoice when clicked button invoice', default=0)\n pos_auto_invoice = fields.Boolean('Auto create invoice', help=\n 'Automatic create invoice if order have client', default=0)\n receipt_invoice_number = fields.Boolean('Add invoice on receipt', help=\n 'Show invoice number on receipt header', default=0)\n receipt_customer_vat = fields.Boolean('Add vat customer on receipt',\n help='Show customer VAT(TIN) on receipt header', default=0)\n auto_register_payment = fields.Boolean('Auto invocie register payment',\n default=0)\n fiscal_position_auto_detect = fields.Boolean('Fiscal position auto detect',\n default=0)\n display_sale_price_within_tax = fields.Boolean(\n 'Display sale price within tax', default=0)\n display_cost_price = fields.Boolean('Display product cost price', default=0\n )\n display_product_ref = fields.Boolean('Display product ref', default=0)\n multi_location = fields.Boolean('Multi location', default=0)\n product_view = fields.Selection([('box', 'Box view'), ('list',\n 'List view')], default='box', string='View of products screen',\n required=1)\n ticket_font_size = fields.Integer('Ticket font size', default=12)\n customer_default_id = fields.Many2one('res.partner', 'Customer default')\n medical_insurance = fields.Boolean('Medical insurance', default=0)\n set_guest = fields.Boolean('Set guest', default=0)\n reset_sequence = fields.Boolean('Reset sequence order', default=0)\n update_tax = fields.Boolean('Modify tax', default=0, help=\n 'Cashier can change tax of order line')\n subtotal_tax_included = fields.Boolean('Show Tax-Included Prices', help\n ='When checked, subtotal of line will display amount have tax-included'\n )\n cash_out = fields.Boolean('Take money out', default=0, help=\n 'Allow cashiers take money out')\n cash_in = fields.Boolean('Push money in', default=0, help=\n 'Allow cashiers input money in')\n min_length_search = fields.Integer('Min character length search',\n default=3, help=\n 'Allow auto suggestion items when cashiers input on search box')\n review_receipt_before_paid = fields.Boolean('Review receipt before paid',\n help='Show receipt before paid order', default=1)\n keyboard_event = fields.Boolean('Keyboard event', default=0, help=\n 'Allow cashiers use shortcut keyboard')\n multi_variant = fields.Boolean('Multi variant', default=0, help=\n 'Allow cashiers change variant of order lines on pos screen')\n switch_user = fields.Boolean('Switch user', default=0, help=\n 'Allow cashiers switch to another cashier')\n change_unit_of_measure = fields.Boolean('Change unit of measure',\n default=0, help='Allow cashiers change unit of measure of order lines')\n print_last_order = fields.Boolean('Print last receipt', default=0, help\n ='Allow cashiers print last receipt')\n close_session = fields.Boolean('Close session', help=\n 'When cashiers click close pos, auto log out of system', default=0)\n display_image_product = fields.Boolean('Display image product', default\n =1, help='Allow hide/display product images on pos screen')\n printer_on_off = fields.Boolean('On/Off printer', help=\n 'Help cashier turn on/off printer viva posbox', default=0)\n check_duplicate_email = fields.Boolean('Check duplicate email', default=0)\n check_duplicate_phone = fields.Boolean('Check duplicate phone', default=0)\n hide_country = fields.Boolean('Hide country', default=0)\n hide_barcode = fields.Boolean('Hide barcode', default=0)\n hide_tax = fields.Boolean('Hide tax', default=0)\n hide_pricelist = fields.Boolean('Hide pricelists', default=0)\n hide_supplier = fields.Boolean('Hide suppiers', default=1)\n auto_remove_line = fields.Boolean('Auto remove line', default=1, help=\n 'When cashier set quantity of line to 0, line auto remove not keep line with qty is 0'\n )\n chat = fields.Boolean('Chat message', default=0, help=\n 'Allow chat, discuss between pos sessions')\n add_tags = fields.Boolean('Add tags line', default=0, help=\n 'Allow cashiers add tags to order lines')\n add_notes = fields.Boolean('Add notes line', default=0, help=\n 'Allow cashiers add notes to order lines')\n add_sale_person = fields.Boolean('Add sale person', default=0)\n logo = fields.Binary('Logo of store')\n paid_full = fields.Boolean('Allow paid full', default=0, help=\n 'Allow cashiers click one button, do payment full order')\n paid_partial = fields.Boolean('Allow partial payment', default=0, help=\n 'Allow cashiers do partial payment')\n backup = fields.Boolean('Backup/Restore orders', default=0, help=\n 'Allow cashiers backup and restore orders on pos screen')\n backup_orders = fields.Text('Backup orders')\n change_logo = fields.Boolean('Change logo', default=1, help=\n 'Allow cashiers change logo of shop on pos screen')\n management_session = fields.Boolean('Management session', default=0)\n barcode_receipt = fields.Boolean('Barcode receipt', default=0)\n hide_mobile = fields.Boolean('Hide mobile', default=1)\n hide_phone = fields.Boolean('Hide phone', default=1)\n hide_email = fields.Boolean('Hide email', default=1)\n update_client = fields.Boolean('Update client', help=\n 'Uncheck if you dont want cashier change customer information on pos')\n add_client = fields.Boolean('Add client', help=\n 'Uncheck if you dont want cashier add new customers on pos')\n remove_client = fields.Boolean('Remove client', help=\n 'Uncheck if you dont want cashier remove customers on pos')\n mobile_responsive = fields.Boolean('Mobile responsive', default=0)\n hide_amount_total = fields.Boolean('Hide amount total', default=1)\n hide_amount_taxes = fields.Boolean('Hide amount taxes', default=1)\n report_no_of_report = fields.Integer(string='No.of Copy Receipt', default=1\n )\n report_signature = fields.Boolean(string='Report Signature', default=1)\n report_product_summary = fields.Boolean(string='Report Product Summary',\n default=1)\n report_product_current_month_date = fields.Boolean(string=\n 'Report This Month', default=1)\n report_order_summary = fields.Boolean(string='Report Order Summary',\n default=1)\n report_order_current_month_date = fields.Boolean(string=\n 'Report Current Month', default=1)\n report_payment_summary = fields.Boolean(string='Report Payment Summary',\n default=1)\n report_payment_current_month_date = fields.Boolean(string=\n 'Payment Current Month', default=1)\n active_product_sort_by = fields.Boolean('Active product sort by', default=1\n )\n default_product_sort_by = fields.Selection([('a_z', 'Sort from A to Z'),\n ('z_a', 'Sort from Z to A'), ('low_price',\n 'Sort from low to high price'), ('high_price',\n 'Sort from high to low price'), ('pos_sequence',\n 'Product pos sequence')], string='Default sort by', default='a_z')\n sale_extra = fields.Boolean('Sale extra', default=1)\n required_add_customer_before_put_product_to_cart = fields.Boolean(\n 'Required add customer first', help=\n 'If you checked on this checkbox, in POS always required cashier add customer the first'\n )\n only_one_time_add_customer = fields.Boolean('Only one time add customer',\n help='Each orders, only one time add customer')\n use_parameters = fields.Boolean('Use parameters', help=\n 'POS need only one time save parameter datas use on POS, and next times no need call backend'\n , default=1)\n time_refresh_parameter = fields.Integer('Time refresh datas (seconds)',\n help='Time for refresh parameters data', default=30)\n\n @api.model\n def switch_mobile_mode(self, config_id, vals):\n if vals.get('mobile_responsive') == True:\n vals['product_view'] = 'box'\n return self.browse(config_id).sudo().write(vals)\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n\n @api.onchange('receipt_invoice_number')\n def _onchange_receipt_invoice_number(self):\n if self.receipt_invoice_number == True:\n self.lock_print_invoice_on_pos = False\n else:\n self.lock_print_invoice_on_pos = True\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if (config.management_session and not config.\n default_cashbox_lines_ids and not config.cash_control):\n raise UserError('Please go to Cash control and add Default Opening'\n )\n if (config.validate_by_user_id and not config.validate_by_user_id.\n pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_voucher_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'voucher'})\n Account = self.env['account.account']\n voucher_account_old_version = Account.sudo().search([('code', '=',\n 'AVC'), ('company_id', '=', user.company_id.id)])\n if voucher_account_old_version:\n voucher_account = voucher_account_old_version[0]\n else:\n voucher_account = Account.sudo().create({'name':\n 'Account voucher', 'code': 'AVC', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AVC\" auto give voucher histories of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_voucher' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n voucher_account.id, 'noupdate': True})\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'voucher')])\n if voucher_journal:\n voucher_journal[0].sudo().write({'voucher': True,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id,\n 'pos_method_type': 'voucher', 'sequence': 101})\n voucher_journal = voucher_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Voucher ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'AVC ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n voucher_journal = Journal.sudo().create({'name': 'Voucher',\n 'code': 'VCJ', 'type': 'cash', 'pos_method_type': 'voucher',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id, 'sequence':\n 101})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_voucher_' + str(voucher_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n voucher_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, voucher_journal.id)]})\n statement = [(0, 0, {'journal_id': voucher_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_credit_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'credit'})\n Account = self.env['account.account']\n credit_account_old_version = Account.sudo().search([('code', '=',\n 'ACJ'), ('company_id', '=', user.company_id.id)])\n if credit_account_old_version:\n credit_account = credit_account_old_version[0]\n else:\n credit_account = Account.sudo().create({'name':\n 'Credit Account', 'code': 'CA', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"CA\" give credit payment customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_credit' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n credit_account.id, 'noupdate': True})\n credit_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'credit')])\n if credit_journal:\n credit_journal[0].sudo().write({'credit': True,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'pos_method_type': 'credit', 'sequence': 102})\n credit_journal = credit_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Credit account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'CA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n credit_journal = Journal.sudo().create({'name':\n 'Customer Credit', 'code': 'CJ', 'type': 'cash',\n 'pos_method_type': 'credit', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': credit_account.\n id, 'default_credit_account_id': credit_account.id,\n 'sequence': 102})\n self.env['ir.model.data'].sudo().create({'name': \n 'credit_journal_' + str(credit_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n credit_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, credit_journal.id)]})\n statement = [(0, 0, {'journal_id': credit_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_rounding_journal(self):\n Journal = self.env['account.journal']\n Account = self.env['account.account']\n user = self.env.user\n rounding_journal = Journal.sudo().search([('code', '=', 'RDJ'), (\n 'company_id', '=', user.company_id.id)])\n if rounding_journal:\n return rounding_journal.sudo().write({'pos_method_type':\n 'rounding'})\n rounding_account_old_version = Account.sudo().search([('code', '=',\n 'AAR'), ('company_id', '=', user.company_id.id)])\n if rounding_account_old_version:\n rounding_account = rounding_account_old_version[0]\n else:\n _logger.info('rounding_account have not')\n rounding_account = Account.sudo().create({'name':\n 'Rounding Account', 'code': 'AAR', 'user_type_id': self.env\n .ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AAR\" give rounding pos order'})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n rounding_account.id, 'noupdate': True})\n rounding_journal = Journal.sudo().search([('pos_method_type', '=',\n 'rounding'), ('company_id', '=', user.company_id.id)])\n if rounding_journal:\n rounding_journal[0].sudo().write({'name': 'Rounding',\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'pos_method_type': 'rounding', 'code': 'RDJ'})\n rounding_journal = rounding_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'rounding account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n rounding_journal = Journal.sudo().create({'name': 'Rounding',\n 'code': 'RDJ', 'type': 'cash', 'pos_method_type':\n 'rounding', 'journal_user': True, 'sequence_id':\n new_sequence.id, 'company_id': user.company_id.id,\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_journal_' + str(rounding_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n rounding_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, rounding_journal.id)]})\n statement = [(0, 0, {'journal_id': rounding_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n\n\nclass pos_config_image(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass pos_config(models.Model):\n _inherit = 'pos.config'\n user_id = fields.Many2one('res.users', 'Assigned to')\n config_access_right = fields.Boolean('Config access right', default=1)\n allow_discount = fields.Boolean('Change discount', default=1)\n allow_qty = fields.Boolean('Change quantity', default=1)\n allow_price = fields.Boolean('Change price', default=1)\n allow_remove_line = fields.Boolean('Remove line', default=1)\n allow_numpad = fields.Boolean('Display numpad', default=1)\n allow_payment = fields.Boolean('Display payment', default=1)\n allow_customer = fields.Boolean('Choice customer', default=1)\n allow_add_order = fields.Boolean('New order', default=1)\n allow_remove_order = fields.Boolean('Remove order', default=1)\n allow_add_product = fields.Boolean('Add line', default=1)\n allow_lock_screen = fields.Boolean('Lock screen', default=0, help=\n 'When pos sessions start, cashiers required open POS viva pos pass pin (Setting/Users)'\n )\n display_point_receipt = fields.Boolean('Display point / receipt')\n loyalty_id = fields.Many2one('pos.loyalty', 'Loyalty', domain=[('state',\n '=', 'running')])\n promotion_ids = fields.Many2many('pos.promotion',\n 'pos_config_promotion_rel', 'config_id', 'promotion_id', string=\n 'Promotion programs')\n promotion_manual_select = fields.Boolean('Promotion manual choice',\n default=0)\n create_purchase_order = fields.Boolean('Create PO', default=0)\n create_purchase_order_required_signature = fields.Boolean(\n 'Required signature', default=0)\n purchase_order_state = fields.Selection([('confirm_order',\n 'Auto confirm'), ('confirm_picking', 'Auto delivery'), (\n 'confirm_invoice', 'Auto invoice')], 'PO state', help=\n 'This is state of purchase order will process to', default=\n 'confirm_invoice')\n sync_sale_order = fields.Boolean('Sync sale orders', default=0)\n sale_order = fields.Boolean('Create Sale order', default=0)\n sale_order_auto_confirm = fields.Boolean('Auto confirm', default=0)\n sale_order_auto_invoice = fields.Boolean('Auto paid', default=0)\n sale_order_auto_delivery = fields.Boolean('Auto delivery', default=0)\n pos_orders_management = fields.Boolean('POS order management', default=0)\n pos_order_period_return_days = fields.Float('Return period days', help=\n 'this is period time for customer can return order', default=30)\n display_return_days_receipt = fields.Boolean('Display return days receipt',\n default=0)\n sync_pricelist = fields.Boolean('Sync prices list', default=0)\n display_onhand = fields.Boolean('Show qty available product', default=1,\n help='Display quantity on hand all products on pos screen')\n large_stocks = fields.Boolean('Large stock', help=\n 'If count products bigger than 100,000 rows, please check it')\n allow_order_out_of_stock = fields.Boolean('Allow out-of-stock', default\n =1, help='If checked, allow cashier can add product have out of stock')\n allow_of_stock_approve_by_admin = fields.Boolean('Approve allow of stock',\n help='Allow manager approve allow of stock')\n print_voucher = fields.Boolean('Print vouchers', help=\n 'Reprint last vouchers', default=1)\n scan_voucher = fields.Boolean('Scan voucher', default=0)\n expired_days_voucher = fields.Integer('Expired days of voucher',\n default=30, help=\n 'Total days keep voucher can use, if out of period days from create date, voucher will expired'\n )\n sync_multi_session = fields.Boolean('Sync multi session', default=0)\n bus_id = fields.Many2one('pos.bus', string='Branch/store')\n display_person_add_line = fields.Boolean('Display information line',\n default=0, help=\n 'When you checked, on pos order lines screen, will display information person created order (lines) Eg: create date, updated date ..'\n )\n quickly_payment = fields.Boolean('Quickly payment', default=0)\n internal_transfer = fields.Boolean('Internal transfer', default=0, help\n ='Go Inventory and active multi warehouse and location')\n internal_transfer_auto_validate = fields.Boolean(\n 'Internal transfer auto validate', default=0)\n discount = fields.Boolean('Global discount', default=0)\n discount_ids = fields.Many2many('pos.global.discount',\n 'pos_config_pos_global_discount_rel', 'config_id', 'discount_id',\n 'Global discounts')\n is_customer_screen = fields.Boolean('Is customer screen')\n delay = fields.Integer('Delay time', default=3000)\n slogan = fields.Char('Slogan', help=\n 'This is message will display on screen of customer')\n image_ids = fields.One2many('pos.config.image', 'config_id', 'Images')\n tooltip = fields.Boolean('Show information of product', default=0)\n tooltip_show_last_price = fields.Boolean('Show last price of product',\n help='Show last price of items of customer have bought before',\n default=0)\n tooltip_show_minimum_sale_price = fields.Boolean(\n 'Show min of product sale price', help=\n 'Show minimum sale price of product', default=0)\n discount_limit = fields.Boolean('Discount limit', default=0)\n discount_limit_amount = fields.Float('Discount limit amount', default=10)\n discount_each_line = fields.Boolean('Discount each line')\n discount_unlock_limit = fields.Boolean('Manager can unlock limit')\n discount_unlock_limit_user_id = fields.Many2one('res.users',\n 'User unlock limit amount')\n multi_currency = fields.Boolean('Multi currency', default=0)\n multi_currency_update_rate = fields.Boolean('Update rate', default=0)\n notify_alert = fields.Boolean('Notify alert', help=\n 'Turn on/off notification alert on POS sessions.', default=0)\n return_products = fields.Boolean('Return orders', help=\n 'Allow cashier return orders, return products', default=0)\n receipt_without_payment_template = fields.Selection([('none', 'None'),\n ('display_price', 'Display price'), ('not_display_price',\n 'Not display price')], default='not_display_price', string=\n 'Receipt without payment template')\n lock_order_printed_receipt = fields.Boolean('Lock order printed receipt',\n default=0)\n staff_level = fields.Selection([('manual', 'Manual config'), (\n 'marketing', 'Marketing'), ('waiter', 'Waiter'), ('cashier',\n 'Cashier'), ('manager', 'Manager')], string='Staff level', default=\n 'manual')\n validate_payment = fields.Boolean('Validate payment')\n validate_remove_order = fields.Boolean('Validate remove order')\n validate_change_minus = fields.Boolean('Validate pressed +/-')\n validate_quantity_change = fields.Boolean('Validate quantity change')\n validate_price_change = fields.Boolean('Validate price change')\n validate_discount_change = fields.Boolean('Validate discount change')\n validate_close_session = fields.Boolean('Validate close session')\n validate_by_user_id = fields.Many2one('res.users', 'Validate by admin')\n apply_validate_return_mode = fields.Boolean('Validate return mode',\n help='If checked, only applied validate when return order', default=1)\n print_user_card = fields.Boolean('Print user card')\n product_operation = fields.Boolean('Product Operation', default=0, help\n ='Allow cashiers add pos categories and products on pos screen')\n quickly_payment_full = fields.Boolean('Quickly payment full')\n quickly_payment_full_journal_id = fields.Many2one('account.journal',\n 'Payment mode', domain=[('journal_user', '=', True)])\n daily_report = fields.Boolean('Daily report', default=0)\n note_order = fields.Boolean('Note order', default=0)\n note_orderline = fields.Boolean('Note order line', default=0)\n signature_order = fields.Boolean('Signature order', default=0)\n quickly_buttons = fields.Boolean('Quickly Actions', default=0)\n display_amount_discount = fields.Boolean('Display amount discount',\n default=0)\n booking_orders = fields.Boolean('Booking orders', default=0)\n booking_orders_required_cashier_signature = fields.Boolean(\n 'Book order required sessions signature', help=\n 'Checked if need required pos seller signature', default=0)\n booking_orders_alert = fields.Boolean('Alert when new order coming',\n default=0)\n delivery_orders = fields.Boolean('Delivery orders', help=\n 'Pos clients can get booking orders and delivery orders', default=0)\n booking_orders_display_shipping_receipt = fields.Boolean(\n 'Display shipping on receipt', default=0)\n display_tax_orderline = fields.Boolean('Display tax orderline', default=0)\n display_tax_receipt = fields.Boolean('Display tax receipt', default=0)\n display_fiscal_position_receipt = fields.Boolean(\n 'Display fiscal position on receipt', default=0)\n display_image_orderline = fields.Boolean('Display image order line',\n default=0)\n display_image_receipt = fields.Boolean('Display image receipt', default=0)\n duplicate_receipt = fields.Boolean('Duplicate Receipt')\n print_number = fields.Integer('Print number', help=\n 'How many number receipt need to print at printer ?', default=0)\n lock_session = fields.Boolean('Lock session', default=0)\n category_wise_receipt = fields.Boolean('Category wise receipt', default=0)\n management_invoice = fields.Boolean('Management Invoice', default=0)\n invoice_journal_ids = fields.Many2many('account.journal',\n 'pos_config_invoice_journal_rel', 'config_id', 'journal_id',\n 'Accounting Invoice Journal', domain=[('type', '=', 'sale')], help=\n 'Accounting journal use for create invoices.')\n send_invoice_email = fields.Boolean('Send email invoice', help=\n 'Help cashier send invoice to email of customer', default=0)\n lock_print_invoice_on_pos = fields.Boolean('Lock print invoice', help=\n 'Lock print pdf invoice when clicked button invoice', default=0)\n pos_auto_invoice = fields.Boolean('Auto create invoice', help=\n 'Automatic create invoice if order have client', default=0)\n receipt_invoice_number = fields.Boolean('Add invoice on receipt', help=\n 'Show invoice number on receipt header', default=0)\n receipt_customer_vat = fields.Boolean('Add vat customer on receipt',\n help='Show customer VAT(TIN) on receipt header', default=0)\n auto_register_payment = fields.Boolean('Auto invocie register payment',\n default=0)\n fiscal_position_auto_detect = fields.Boolean('Fiscal position auto detect',\n default=0)\n display_sale_price_within_tax = fields.Boolean(\n 'Display sale price within tax', default=0)\n display_cost_price = fields.Boolean('Display product cost price', default=0\n )\n display_product_ref = fields.Boolean('Display product ref', default=0)\n multi_location = fields.Boolean('Multi location', default=0)\n product_view = fields.Selection([('box', 'Box view'), ('list',\n 'List view')], default='box', string='View of products screen',\n required=1)\n ticket_font_size = fields.Integer('Ticket font size', default=12)\n customer_default_id = fields.Many2one('res.partner', 'Customer default')\n medical_insurance = fields.Boolean('Medical insurance', default=0)\n set_guest = fields.Boolean('Set guest', default=0)\n reset_sequence = fields.Boolean('Reset sequence order', default=0)\n update_tax = fields.Boolean('Modify tax', default=0, help=\n 'Cashier can change tax of order line')\n subtotal_tax_included = fields.Boolean('Show Tax-Included Prices', help\n ='When checked, subtotal of line will display amount have tax-included'\n )\n cash_out = fields.Boolean('Take money out', default=0, help=\n 'Allow cashiers take money out')\n cash_in = fields.Boolean('Push money in', default=0, help=\n 'Allow cashiers input money in')\n min_length_search = fields.Integer('Min character length search',\n default=3, help=\n 'Allow auto suggestion items when cashiers input on search box')\n review_receipt_before_paid = fields.Boolean('Review receipt before paid',\n help='Show receipt before paid order', default=1)\n keyboard_event = fields.Boolean('Keyboard event', default=0, help=\n 'Allow cashiers use shortcut keyboard')\n multi_variant = fields.Boolean('Multi variant', default=0, help=\n 'Allow cashiers change variant of order lines on pos screen')\n switch_user = fields.Boolean('Switch user', default=0, help=\n 'Allow cashiers switch to another cashier')\n change_unit_of_measure = fields.Boolean('Change unit of measure',\n default=0, help='Allow cashiers change unit of measure of order lines')\n print_last_order = fields.Boolean('Print last receipt', default=0, help\n ='Allow cashiers print last receipt')\n close_session = fields.Boolean('Close session', help=\n 'When cashiers click close pos, auto log out of system', default=0)\n display_image_product = fields.Boolean('Display image product', default\n =1, help='Allow hide/display product images on pos screen')\n printer_on_off = fields.Boolean('On/Off printer', help=\n 'Help cashier turn on/off printer viva posbox', default=0)\n check_duplicate_email = fields.Boolean('Check duplicate email', default=0)\n check_duplicate_phone = fields.Boolean('Check duplicate phone', default=0)\n hide_country = fields.Boolean('Hide country', default=0)\n hide_barcode = fields.Boolean('Hide barcode', default=0)\n hide_tax = fields.Boolean('Hide tax', default=0)\n hide_pricelist = fields.Boolean('Hide pricelists', default=0)\n hide_supplier = fields.Boolean('Hide suppiers', default=1)\n auto_remove_line = fields.Boolean('Auto remove line', default=1, help=\n 'When cashier set quantity of line to 0, line auto remove not keep line with qty is 0'\n )\n chat = fields.Boolean('Chat message', default=0, help=\n 'Allow chat, discuss between pos sessions')\n add_tags = fields.Boolean('Add tags line', default=0, help=\n 'Allow cashiers add tags to order lines')\n add_notes = fields.Boolean('Add notes line', default=0, help=\n 'Allow cashiers add notes to order lines')\n add_sale_person = fields.Boolean('Add sale person', default=0)\n logo = fields.Binary('Logo of store')\n paid_full = fields.Boolean('Allow paid full', default=0, help=\n 'Allow cashiers click one button, do payment full order')\n paid_partial = fields.Boolean('Allow partial payment', default=0, help=\n 'Allow cashiers do partial payment')\n backup = fields.Boolean('Backup/Restore orders', default=0, help=\n 'Allow cashiers backup and restore orders on pos screen')\n backup_orders = fields.Text('Backup orders')\n change_logo = fields.Boolean('Change logo', default=1, help=\n 'Allow cashiers change logo of shop on pos screen')\n management_session = fields.Boolean('Management session', default=0)\n barcode_receipt = fields.Boolean('Barcode receipt', default=0)\n hide_mobile = fields.Boolean('Hide mobile', default=1)\n hide_phone = fields.Boolean('Hide phone', default=1)\n hide_email = fields.Boolean('Hide email', default=1)\n update_client = fields.Boolean('Update client', help=\n 'Uncheck if you dont want cashier change customer information on pos')\n add_client = fields.Boolean('Add client', help=\n 'Uncheck if you dont want cashier add new customers on pos')\n remove_client = fields.Boolean('Remove client', help=\n 'Uncheck if you dont want cashier remove customers on pos')\n mobile_responsive = fields.Boolean('Mobile responsive', default=0)\n hide_amount_total = fields.Boolean('Hide amount total', default=1)\n hide_amount_taxes = fields.Boolean('Hide amount taxes', default=1)\n report_no_of_report = fields.Integer(string='No.of Copy Receipt', default=1\n )\n report_signature = fields.Boolean(string='Report Signature', default=1)\n report_product_summary = fields.Boolean(string='Report Product Summary',\n default=1)\n report_product_current_month_date = fields.Boolean(string=\n 'Report This Month', default=1)\n report_order_summary = fields.Boolean(string='Report Order Summary',\n default=1)\n report_order_current_month_date = fields.Boolean(string=\n 'Report Current Month', default=1)\n report_payment_summary = fields.Boolean(string='Report Payment Summary',\n default=1)\n report_payment_current_month_date = fields.Boolean(string=\n 'Payment Current Month', default=1)\n active_product_sort_by = fields.Boolean('Active product sort by', default=1\n )\n default_product_sort_by = fields.Selection([('a_z', 'Sort from A to Z'),\n ('z_a', 'Sort from Z to A'), ('low_price',\n 'Sort from low to high price'), ('high_price',\n 'Sort from high to low price'), ('pos_sequence',\n 'Product pos sequence')], string='Default sort by', default='a_z')\n sale_extra = fields.Boolean('Sale extra', default=1)\n required_add_customer_before_put_product_to_cart = fields.Boolean(\n 'Required add customer first', help=\n 'If you checked on this checkbox, in POS always required cashier add customer the first'\n )\n only_one_time_add_customer = fields.Boolean('Only one time add customer',\n help='Each orders, only one time add customer')\n use_parameters = fields.Boolean('Use parameters', help=\n 'POS need only one time save parameter datas use on POS, and next times no need call backend'\n , default=1)\n time_refresh_parameter = fields.Integer('Time refresh datas (seconds)',\n help='Time for refresh parameters data', default=30)\n\n @api.model\n def switch_mobile_mode(self, config_id, vals):\n if vals.get('mobile_responsive') == True:\n vals['product_view'] = 'box'\n return self.browse(config_id).sudo().write(vals)\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n\n @api.onchange('receipt_invoice_number')\n def _onchange_receipt_invoice_number(self):\n if self.receipt_invoice_number == True:\n self.lock_print_invoice_on_pos = False\n else:\n self.lock_print_invoice_on_pos = True\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if (config.management_session and not config.\n default_cashbox_lines_ids and not config.cash_control):\n raise UserError('Please go to Cash control and add Default Opening'\n )\n if (config.validate_by_user_id and not config.validate_by_user_id.\n pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_voucher_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'voucher'})\n Account = self.env['account.account']\n voucher_account_old_version = Account.sudo().search([('code', '=',\n 'AVC'), ('company_id', '=', user.company_id.id)])\n if voucher_account_old_version:\n voucher_account = voucher_account_old_version[0]\n else:\n voucher_account = Account.sudo().create({'name':\n 'Account voucher', 'code': 'AVC', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AVC\" auto give voucher histories of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_voucher' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n voucher_account.id, 'noupdate': True})\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'voucher')])\n if voucher_journal:\n voucher_journal[0].sudo().write({'voucher': True,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id,\n 'pos_method_type': 'voucher', 'sequence': 101})\n voucher_journal = voucher_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Voucher ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'AVC ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n voucher_journal = Journal.sudo().create({'name': 'Voucher',\n 'code': 'VCJ', 'type': 'cash', 'pos_method_type': 'voucher',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id, 'sequence':\n 101})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_voucher_' + str(voucher_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n voucher_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, voucher_journal.id)]})\n statement = [(0, 0, {'journal_id': voucher_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_credit_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'credit'})\n Account = self.env['account.account']\n credit_account_old_version = Account.sudo().search([('code', '=',\n 'ACJ'), ('company_id', '=', user.company_id.id)])\n if credit_account_old_version:\n credit_account = credit_account_old_version[0]\n else:\n credit_account = Account.sudo().create({'name':\n 'Credit Account', 'code': 'CA', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"CA\" give credit payment customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_credit' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n credit_account.id, 'noupdate': True})\n credit_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'credit')])\n if credit_journal:\n credit_journal[0].sudo().write({'credit': True,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'pos_method_type': 'credit', 'sequence': 102})\n credit_journal = credit_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Credit account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'CA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n credit_journal = Journal.sudo().create({'name':\n 'Customer Credit', 'code': 'CJ', 'type': 'cash',\n 'pos_method_type': 'credit', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': credit_account.\n id, 'default_credit_account_id': credit_account.id,\n 'sequence': 102})\n self.env['ir.model.data'].sudo().create({'name': \n 'credit_journal_' + str(credit_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n credit_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, credit_journal.id)]})\n statement = [(0, 0, {'journal_id': credit_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_rounding_journal(self):\n Journal = self.env['account.journal']\n Account = self.env['account.account']\n user = self.env.user\n rounding_journal = Journal.sudo().search([('code', '=', 'RDJ'), (\n 'company_id', '=', user.company_id.id)])\n if rounding_journal:\n return rounding_journal.sudo().write({'pos_method_type':\n 'rounding'})\n rounding_account_old_version = Account.sudo().search([('code', '=',\n 'AAR'), ('company_id', '=', user.company_id.id)])\n if rounding_account_old_version:\n rounding_account = rounding_account_old_version[0]\n else:\n _logger.info('rounding_account have not')\n rounding_account = Account.sudo().create({'name':\n 'Rounding Account', 'code': 'AAR', 'user_type_id': self.env\n .ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AAR\" give rounding pos order'})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n rounding_account.id, 'noupdate': True})\n rounding_journal = Journal.sudo().search([('pos_method_type', '=',\n 'rounding'), ('company_id', '=', user.company_id.id)])\n if rounding_journal:\n rounding_journal[0].sudo().write({'name': 'Rounding',\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'pos_method_type': 'rounding', 'code': 'RDJ'})\n rounding_journal = rounding_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'rounding account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n rounding_journal = Journal.sudo().create({'name': 'Rounding',\n 'code': 'RDJ', 'type': 'cash', 'pos_method_type':\n 'rounding', 'journal_user': True, 'sequence_id':\n new_sequence.id, 'company_id': user.company_id.id,\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_journal_' + str(rounding_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n rounding_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, rounding_journal.id)]})\n statement = [(0, 0, {'journal_id': rounding_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n _inherit = 'pos.config'\n user_id = fields.Many2one('res.users', 'Assigned to')\n config_access_right = fields.Boolean('Config access right', default=1)\n allow_discount = fields.Boolean('Change discount', default=1)\n allow_qty = fields.Boolean('Change quantity', default=1)\n allow_price = fields.Boolean('Change price', default=1)\n allow_remove_line = fields.Boolean('Remove line', default=1)\n allow_numpad = fields.Boolean('Display numpad', default=1)\n allow_payment = fields.Boolean('Display payment', default=1)\n allow_customer = fields.Boolean('Choice customer', default=1)\n allow_add_order = fields.Boolean('New order', default=1)\n allow_remove_order = fields.Boolean('Remove order', default=1)\n allow_add_product = fields.Boolean('Add line', default=1)\n allow_lock_screen = fields.Boolean('Lock screen', default=0, help=\n 'When pos sessions start, cashiers required open POS viva pos pass pin (Setting/Users)'\n )\n display_point_receipt = fields.Boolean('Display point / receipt')\n loyalty_id = fields.Many2one('pos.loyalty', 'Loyalty', domain=[('state',\n '=', 'running')])\n promotion_ids = fields.Many2many('pos.promotion',\n 'pos_config_promotion_rel', 'config_id', 'promotion_id', string=\n 'Promotion programs')\n promotion_manual_select = fields.Boolean('Promotion manual choice',\n default=0)\n create_purchase_order = fields.Boolean('Create PO', default=0)\n create_purchase_order_required_signature = fields.Boolean(\n 'Required signature', default=0)\n purchase_order_state = fields.Selection([('confirm_order',\n 'Auto confirm'), ('confirm_picking', 'Auto delivery'), (\n 'confirm_invoice', 'Auto invoice')], 'PO state', help=\n 'This is state of purchase order will process to', default=\n 'confirm_invoice')\n sync_sale_order = fields.Boolean('Sync sale orders', default=0)\n sale_order = fields.Boolean('Create Sale order', default=0)\n sale_order_auto_confirm = fields.Boolean('Auto confirm', default=0)\n sale_order_auto_invoice = fields.Boolean('Auto paid', default=0)\n sale_order_auto_delivery = fields.Boolean('Auto delivery', default=0)\n pos_orders_management = fields.Boolean('POS order management', default=0)\n pos_order_period_return_days = fields.Float('Return period days', help=\n 'this is period time for customer can return order', default=30)\n display_return_days_receipt = fields.Boolean('Display return days receipt',\n default=0)\n sync_pricelist = fields.Boolean('Sync prices list', default=0)\n display_onhand = fields.Boolean('Show qty available product', default=1,\n help='Display quantity on hand all products on pos screen')\n large_stocks = fields.Boolean('Large stock', help=\n 'If count products bigger than 100,000 rows, please check it')\n allow_order_out_of_stock = fields.Boolean('Allow out-of-stock', default\n =1, help='If checked, allow cashier can add product have out of stock')\n allow_of_stock_approve_by_admin = fields.Boolean('Approve allow of stock',\n help='Allow manager approve allow of stock')\n print_voucher = fields.Boolean('Print vouchers', help=\n 'Reprint last vouchers', default=1)\n scan_voucher = fields.Boolean('Scan voucher', default=0)\n expired_days_voucher = fields.Integer('Expired days of voucher',\n default=30, help=\n 'Total days keep voucher can use, if out of period days from create date, voucher will expired'\n )\n sync_multi_session = fields.Boolean('Sync multi session', default=0)\n bus_id = fields.Many2one('pos.bus', string='Branch/store')\n display_person_add_line = fields.Boolean('Display information line',\n default=0, help=\n 'When you checked, on pos order lines screen, will display information person created order (lines) Eg: create date, updated date ..'\n )\n quickly_payment = fields.Boolean('Quickly payment', default=0)\n internal_transfer = fields.Boolean('Internal transfer', default=0, help\n ='Go Inventory and active multi warehouse and location')\n internal_transfer_auto_validate = fields.Boolean(\n 'Internal transfer auto validate', default=0)\n discount = fields.Boolean('Global discount', default=0)\n discount_ids = fields.Many2many('pos.global.discount',\n 'pos_config_pos_global_discount_rel', 'config_id', 'discount_id',\n 'Global discounts')\n is_customer_screen = fields.Boolean('Is customer screen')\n delay = fields.Integer('Delay time', default=3000)\n slogan = fields.Char('Slogan', help=\n 'This is message will display on screen of customer')\n image_ids = fields.One2many('pos.config.image', 'config_id', 'Images')\n tooltip = fields.Boolean('Show information of product', default=0)\n tooltip_show_last_price = fields.Boolean('Show last price of product',\n help='Show last price of items of customer have bought before',\n default=0)\n tooltip_show_minimum_sale_price = fields.Boolean(\n 'Show min of product sale price', help=\n 'Show minimum sale price of product', default=0)\n discount_limit = fields.Boolean('Discount limit', default=0)\n discount_limit_amount = fields.Float('Discount limit amount', default=10)\n discount_each_line = fields.Boolean('Discount each line')\n discount_unlock_limit = fields.Boolean('Manager can unlock limit')\n discount_unlock_limit_user_id = fields.Many2one('res.users',\n 'User unlock limit amount')\n multi_currency = fields.Boolean('Multi currency', default=0)\n multi_currency_update_rate = fields.Boolean('Update rate', default=0)\n notify_alert = fields.Boolean('Notify alert', help=\n 'Turn on/off notification alert on POS sessions.', default=0)\n return_products = fields.Boolean('Return orders', help=\n 'Allow cashier return orders, return products', default=0)\n receipt_without_payment_template = fields.Selection([('none', 'None'),\n ('display_price', 'Display price'), ('not_display_price',\n 'Not display price')], default='not_display_price', string=\n 'Receipt without payment template')\n lock_order_printed_receipt = fields.Boolean('Lock order printed receipt',\n default=0)\n staff_level = fields.Selection([('manual', 'Manual config'), (\n 'marketing', 'Marketing'), ('waiter', 'Waiter'), ('cashier',\n 'Cashier'), ('manager', 'Manager')], string='Staff level', default=\n 'manual')\n validate_payment = fields.Boolean('Validate payment')\n validate_remove_order = fields.Boolean('Validate remove order')\n validate_change_minus = fields.Boolean('Validate pressed +/-')\n validate_quantity_change = fields.Boolean('Validate quantity change')\n validate_price_change = fields.Boolean('Validate price change')\n validate_discount_change = fields.Boolean('Validate discount change')\n validate_close_session = fields.Boolean('Validate close session')\n validate_by_user_id = fields.Many2one('res.users', 'Validate by admin')\n apply_validate_return_mode = fields.Boolean('Validate return mode',\n help='If checked, only applied validate when return order', default=1)\n print_user_card = fields.Boolean('Print user card')\n product_operation = fields.Boolean('Product Operation', default=0, help\n ='Allow cashiers add pos categories and products on pos screen')\n quickly_payment_full = fields.Boolean('Quickly payment full')\n quickly_payment_full_journal_id = fields.Many2one('account.journal',\n 'Payment mode', domain=[('journal_user', '=', True)])\n daily_report = fields.Boolean('Daily report', default=0)\n note_order = fields.Boolean('Note order', default=0)\n note_orderline = fields.Boolean('Note order line', default=0)\n signature_order = fields.Boolean('Signature order', default=0)\n quickly_buttons = fields.Boolean('Quickly Actions', default=0)\n display_amount_discount = fields.Boolean('Display amount discount',\n default=0)\n booking_orders = fields.Boolean('Booking orders', default=0)\n booking_orders_required_cashier_signature = fields.Boolean(\n 'Book order required sessions signature', help=\n 'Checked if need required pos seller signature', default=0)\n booking_orders_alert = fields.Boolean('Alert when new order coming',\n default=0)\n delivery_orders = fields.Boolean('Delivery orders', help=\n 'Pos clients can get booking orders and delivery orders', default=0)\n booking_orders_display_shipping_receipt = fields.Boolean(\n 'Display shipping on receipt', default=0)\n display_tax_orderline = fields.Boolean('Display tax orderline', default=0)\n display_tax_receipt = fields.Boolean('Display tax receipt', default=0)\n display_fiscal_position_receipt = fields.Boolean(\n 'Display fiscal position on receipt', default=0)\n display_image_orderline = fields.Boolean('Display image order line',\n default=0)\n display_image_receipt = fields.Boolean('Display image receipt', default=0)\n duplicate_receipt = fields.Boolean('Duplicate Receipt')\n print_number = fields.Integer('Print number', help=\n 'How many number receipt need to print at printer ?', default=0)\n lock_session = fields.Boolean('Lock session', default=0)\n category_wise_receipt = fields.Boolean('Category wise receipt', default=0)\n management_invoice = fields.Boolean('Management Invoice', default=0)\n invoice_journal_ids = fields.Many2many('account.journal',\n 'pos_config_invoice_journal_rel', 'config_id', 'journal_id',\n 'Accounting Invoice Journal', domain=[('type', '=', 'sale')], help=\n 'Accounting journal use for create invoices.')\n send_invoice_email = fields.Boolean('Send email invoice', help=\n 'Help cashier send invoice to email of customer', default=0)\n lock_print_invoice_on_pos = fields.Boolean('Lock print invoice', help=\n 'Lock print pdf invoice when clicked button invoice', default=0)\n pos_auto_invoice = fields.Boolean('Auto create invoice', help=\n 'Automatic create invoice if order have client', default=0)\n receipt_invoice_number = fields.Boolean('Add invoice on receipt', help=\n 'Show invoice number on receipt header', default=0)\n receipt_customer_vat = fields.Boolean('Add vat customer on receipt',\n help='Show customer VAT(TIN) on receipt header', default=0)\n auto_register_payment = fields.Boolean('Auto invocie register payment',\n default=0)\n fiscal_position_auto_detect = fields.Boolean('Fiscal position auto detect',\n default=0)\n display_sale_price_within_tax = fields.Boolean(\n 'Display sale price within tax', default=0)\n display_cost_price = fields.Boolean('Display product cost price', default=0\n )\n display_product_ref = fields.Boolean('Display product ref', default=0)\n multi_location = fields.Boolean('Multi location', default=0)\n product_view = fields.Selection([('box', 'Box view'), ('list',\n 'List view')], default='box', string='View of products screen',\n required=1)\n ticket_font_size = fields.Integer('Ticket font size', default=12)\n customer_default_id = fields.Many2one('res.partner', 'Customer default')\n medical_insurance = fields.Boolean('Medical insurance', default=0)\n set_guest = fields.Boolean('Set guest', default=0)\n reset_sequence = fields.Boolean('Reset sequence order', default=0)\n update_tax = fields.Boolean('Modify tax', default=0, help=\n 'Cashier can change tax of order line')\n subtotal_tax_included = fields.Boolean('Show Tax-Included Prices', help\n ='When checked, subtotal of line will display amount have tax-included'\n )\n cash_out = fields.Boolean('Take money out', default=0, help=\n 'Allow cashiers take money out')\n cash_in = fields.Boolean('Push money in', default=0, help=\n 'Allow cashiers input money in')\n min_length_search = fields.Integer('Min character length search',\n default=3, help=\n 'Allow auto suggestion items when cashiers input on search box')\n review_receipt_before_paid = fields.Boolean('Review receipt before paid',\n help='Show receipt before paid order', default=1)\n keyboard_event = fields.Boolean('Keyboard event', default=0, help=\n 'Allow cashiers use shortcut keyboard')\n multi_variant = fields.Boolean('Multi variant', default=0, help=\n 'Allow cashiers change variant of order lines on pos screen')\n switch_user = fields.Boolean('Switch user', default=0, help=\n 'Allow cashiers switch to another cashier')\n change_unit_of_measure = fields.Boolean('Change unit of measure',\n default=0, help='Allow cashiers change unit of measure of order lines')\n print_last_order = fields.Boolean('Print last receipt', default=0, help\n ='Allow cashiers print last receipt')\n close_session = fields.Boolean('Close session', help=\n 'When cashiers click close pos, auto log out of system', default=0)\n display_image_product = fields.Boolean('Display image product', default\n =1, help='Allow hide/display product images on pos screen')\n printer_on_off = fields.Boolean('On/Off printer', help=\n 'Help cashier turn on/off printer viva posbox', default=0)\n check_duplicate_email = fields.Boolean('Check duplicate email', default=0)\n check_duplicate_phone = fields.Boolean('Check duplicate phone', default=0)\n hide_country = fields.Boolean('Hide country', default=0)\n hide_barcode = fields.Boolean('Hide barcode', default=0)\n hide_tax = fields.Boolean('Hide tax', default=0)\n hide_pricelist = fields.Boolean('Hide pricelists', default=0)\n hide_supplier = fields.Boolean('Hide suppiers', default=1)\n auto_remove_line = fields.Boolean('Auto remove line', default=1, help=\n 'When cashier set quantity of line to 0, line auto remove not keep line with qty is 0'\n )\n chat = fields.Boolean('Chat message', default=0, help=\n 'Allow chat, discuss between pos sessions')\n add_tags = fields.Boolean('Add tags line', default=0, help=\n 'Allow cashiers add tags to order lines')\n add_notes = fields.Boolean('Add notes line', default=0, help=\n 'Allow cashiers add notes to order lines')\n add_sale_person = fields.Boolean('Add sale person', default=0)\n logo = fields.Binary('Logo of store')\n paid_full = fields.Boolean('Allow paid full', default=0, help=\n 'Allow cashiers click one button, do payment full order')\n paid_partial = fields.Boolean('Allow partial payment', default=0, help=\n 'Allow cashiers do partial payment')\n backup = fields.Boolean('Backup/Restore orders', default=0, help=\n 'Allow cashiers backup and restore orders on pos screen')\n backup_orders = fields.Text('Backup orders')\n change_logo = fields.Boolean('Change logo', default=1, help=\n 'Allow cashiers change logo of shop on pos screen')\n management_session = fields.Boolean('Management session', default=0)\n barcode_receipt = fields.Boolean('Barcode receipt', default=0)\n hide_mobile = fields.Boolean('Hide mobile', default=1)\n hide_phone = fields.Boolean('Hide phone', default=1)\n hide_email = fields.Boolean('Hide email', default=1)\n update_client = fields.Boolean('Update client', help=\n 'Uncheck if you dont want cashier change customer information on pos')\n add_client = fields.Boolean('Add client', help=\n 'Uncheck if you dont want cashier add new customers on pos')\n remove_client = fields.Boolean('Remove client', help=\n 'Uncheck if you dont want cashier remove customers on pos')\n mobile_responsive = fields.Boolean('Mobile responsive', default=0)\n hide_amount_total = fields.Boolean('Hide amount total', default=1)\n hide_amount_taxes = fields.Boolean('Hide amount taxes', default=1)\n report_no_of_report = fields.Integer(string='No.of Copy Receipt', default=1\n )\n report_signature = fields.Boolean(string='Report Signature', default=1)\n report_product_summary = fields.Boolean(string='Report Product Summary',\n default=1)\n report_product_current_month_date = fields.Boolean(string=\n 'Report This Month', default=1)\n report_order_summary = fields.Boolean(string='Report Order Summary',\n default=1)\n report_order_current_month_date = fields.Boolean(string=\n 'Report Current Month', default=1)\n report_payment_summary = fields.Boolean(string='Report Payment Summary',\n default=1)\n report_payment_current_month_date = fields.Boolean(string=\n 'Payment Current Month', default=1)\n active_product_sort_by = fields.Boolean('Active product sort by', default=1\n )\n default_product_sort_by = fields.Selection([('a_z', 'Sort from A to Z'),\n ('z_a', 'Sort from Z to A'), ('low_price',\n 'Sort from low to high price'), ('high_price',\n 'Sort from high to low price'), ('pos_sequence',\n 'Product pos sequence')], string='Default sort by', default='a_z')\n sale_extra = fields.Boolean('Sale extra', default=1)\n required_add_customer_before_put_product_to_cart = fields.Boolean(\n 'Required add customer first', help=\n 'If you checked on this checkbox, in POS always required cashier add customer the first'\n )\n only_one_time_add_customer = fields.Boolean('Only one time add customer',\n help='Each orders, only one time add customer')\n use_parameters = fields.Boolean('Use parameters', help=\n 'POS need only one time save parameter datas use on POS, and next times no need call backend'\n , default=1)\n time_refresh_parameter = fields.Integer('Time refresh datas (seconds)',\n help='Time for refresh parameters data', default=30)\n\n @api.model\n def switch_mobile_mode(self, config_id, vals):\n if vals.get('mobile_responsive') == True:\n vals['product_view'] = 'box'\n return self.browse(config_id).sudo().write(vals)\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n\n @api.onchange('receipt_invoice_number')\n def _onchange_receipt_invoice_number(self):\n if self.receipt_invoice_number == True:\n self.lock_print_invoice_on_pos = False\n else:\n self.lock_print_invoice_on_pos = True\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if (config.management_session and not config.\n default_cashbox_lines_ids and not config.cash_control):\n raise UserError('Please go to Cash control and add Default Opening'\n )\n if (config.validate_by_user_id and not config.validate_by_user_id.\n pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_voucher_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'voucher'})\n Account = self.env['account.account']\n voucher_account_old_version = Account.sudo().search([('code', '=',\n 'AVC'), ('company_id', '=', user.company_id.id)])\n if voucher_account_old_version:\n voucher_account = voucher_account_old_version[0]\n else:\n voucher_account = Account.sudo().create({'name':\n 'Account voucher', 'code': 'AVC', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AVC\" auto give voucher histories of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_voucher' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n voucher_account.id, 'noupdate': True})\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'voucher')])\n if voucher_journal:\n voucher_journal[0].sudo().write({'voucher': True,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id,\n 'pos_method_type': 'voucher', 'sequence': 101})\n voucher_journal = voucher_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Voucher ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'AVC ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n voucher_journal = Journal.sudo().create({'name': 'Voucher',\n 'code': 'VCJ', 'type': 'cash', 'pos_method_type': 'voucher',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id, 'sequence':\n 101})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_voucher_' + str(voucher_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n voucher_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, voucher_journal.id)]})\n statement = [(0, 0, {'journal_id': voucher_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_credit_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'credit'})\n Account = self.env['account.account']\n credit_account_old_version = Account.sudo().search([('code', '=',\n 'ACJ'), ('company_id', '=', user.company_id.id)])\n if credit_account_old_version:\n credit_account = credit_account_old_version[0]\n else:\n credit_account = Account.sudo().create({'name':\n 'Credit Account', 'code': 'CA', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"CA\" give credit payment customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_credit' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n credit_account.id, 'noupdate': True})\n credit_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'credit')])\n if credit_journal:\n credit_journal[0].sudo().write({'credit': True,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'pos_method_type': 'credit', 'sequence': 102})\n credit_journal = credit_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Credit account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'CA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n credit_journal = Journal.sudo().create({'name':\n 'Customer Credit', 'code': 'CJ', 'type': 'cash',\n 'pos_method_type': 'credit', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': credit_account.\n id, 'default_credit_account_id': credit_account.id,\n 'sequence': 102})\n self.env['ir.model.data'].sudo().create({'name': \n 'credit_journal_' + str(credit_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n credit_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, credit_journal.id)]})\n statement = [(0, 0, {'journal_id': credit_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_rounding_journal(self):\n Journal = self.env['account.journal']\n Account = self.env['account.account']\n user = self.env.user\n rounding_journal = Journal.sudo().search([('code', '=', 'RDJ'), (\n 'company_id', '=', user.company_id.id)])\n if rounding_journal:\n return rounding_journal.sudo().write({'pos_method_type':\n 'rounding'})\n rounding_account_old_version = Account.sudo().search([('code', '=',\n 'AAR'), ('company_id', '=', user.company_id.id)])\n if rounding_account_old_version:\n rounding_account = rounding_account_old_version[0]\n else:\n _logger.info('rounding_account have not')\n rounding_account = Account.sudo().create({'name':\n 'Rounding Account', 'code': 'AAR', 'user_type_id': self.env\n .ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AAR\" give rounding pos order'})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n rounding_account.id, 'noupdate': True})\n rounding_journal = Journal.sudo().search([('pos_method_type', '=',\n 'rounding'), ('company_id', '=', user.company_id.id)])\n if rounding_journal:\n rounding_journal[0].sudo().write({'name': 'Rounding',\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'pos_method_type': 'rounding', 'code': 'RDJ'})\n rounding_journal = rounding_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'rounding account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n rounding_journal = Journal.sudo().create({'name': 'Rounding',\n 'code': 'RDJ', 'type': 'cash', 'pos_method_type':\n 'rounding', 'journal_user': True, 'sequence_id':\n new_sequence.id, 'company_id': user.company_id.id,\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_journal_' + str(rounding_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n rounding_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, rounding_journal.id)]})\n statement = [(0, 0, {'journal_id': rounding_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n @api.model\n def switch_mobile_mode(self, config_id, vals):\n if vals.get('mobile_responsive') == True:\n vals['product_view'] = 'box'\n return self.browse(config_id).sudo().write(vals)\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n\n @api.onchange('receipt_invoice_number')\n def _onchange_receipt_invoice_number(self):\n if self.receipt_invoice_number == True:\n self.lock_print_invoice_on_pos = False\n else:\n self.lock_print_invoice_on_pos = True\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if (config.management_session and not config.\n default_cashbox_lines_ids and not config.cash_control):\n raise UserError('Please go to Cash control and add Default Opening'\n )\n if (config.validate_by_user_id and not config.validate_by_user_id.\n pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_voucher_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'voucher'})\n Account = self.env['account.account']\n voucher_account_old_version = Account.sudo().search([('code', '=',\n 'AVC'), ('company_id', '=', user.company_id.id)])\n if voucher_account_old_version:\n voucher_account = voucher_account_old_version[0]\n else:\n voucher_account = Account.sudo().create({'name':\n 'Account voucher', 'code': 'AVC', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AVC\" auto give voucher histories of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_voucher' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n voucher_account.id, 'noupdate': True})\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'voucher')])\n if voucher_journal:\n voucher_journal[0].sudo().write({'voucher': True,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id,\n 'pos_method_type': 'voucher', 'sequence': 101})\n voucher_journal = voucher_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Voucher ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'AVC ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n voucher_journal = Journal.sudo().create({'name': 'Voucher',\n 'code': 'VCJ', 'type': 'cash', 'pos_method_type': 'voucher',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id, 'sequence':\n 101})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_voucher_' + str(voucher_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n voucher_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, voucher_journal.id)]})\n statement = [(0, 0, {'journal_id': voucher_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_credit_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'credit'})\n Account = self.env['account.account']\n credit_account_old_version = Account.sudo().search([('code', '=',\n 'ACJ'), ('company_id', '=', user.company_id.id)])\n if credit_account_old_version:\n credit_account = credit_account_old_version[0]\n else:\n credit_account = Account.sudo().create({'name':\n 'Credit Account', 'code': 'CA', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"CA\" give credit payment customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_credit' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n credit_account.id, 'noupdate': True})\n credit_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'credit')])\n if credit_journal:\n credit_journal[0].sudo().write({'credit': True,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'pos_method_type': 'credit', 'sequence': 102})\n credit_journal = credit_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Credit account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'CA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n credit_journal = Journal.sudo().create({'name':\n 'Customer Credit', 'code': 'CJ', 'type': 'cash',\n 'pos_method_type': 'credit', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': credit_account.\n id, 'default_credit_account_id': credit_account.id,\n 'sequence': 102})\n self.env['ir.model.data'].sudo().create({'name': \n 'credit_journal_' + str(credit_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n credit_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, credit_journal.id)]})\n statement = [(0, 0, {'journal_id': credit_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_rounding_journal(self):\n Journal = self.env['account.journal']\n Account = self.env['account.account']\n user = self.env.user\n rounding_journal = Journal.sudo().search([('code', '=', 'RDJ'), (\n 'company_id', '=', user.company_id.id)])\n if rounding_journal:\n return rounding_journal.sudo().write({'pos_method_type':\n 'rounding'})\n rounding_account_old_version = Account.sudo().search([('code', '=',\n 'AAR'), ('company_id', '=', user.company_id.id)])\n if rounding_account_old_version:\n rounding_account = rounding_account_old_version[0]\n else:\n _logger.info('rounding_account have not')\n rounding_account = Account.sudo().create({'name':\n 'Rounding Account', 'code': 'AAR', 'user_type_id': self.env\n .ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AAR\" give rounding pos order'})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n rounding_account.id, 'noupdate': True})\n rounding_journal = Journal.sudo().search([('pos_method_type', '=',\n 'rounding'), ('company_id', '=', user.company_id.id)])\n if rounding_journal:\n rounding_journal[0].sudo().write({'name': 'Rounding',\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'pos_method_type': 'rounding', 'code': 'RDJ'})\n rounding_journal = rounding_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'rounding account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n rounding_journal = Journal.sudo().create({'name': 'Rounding',\n 'code': 'RDJ', 'type': 'cash', 'pos_method_type':\n 'rounding', 'journal_user': True, 'sequence_id':\n new_sequence.id, 'company_id': user.company_id.id,\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_journal_' + str(rounding_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n rounding_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, rounding_journal.id)]})\n statement = [(0, 0, {'journal_id': rounding_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n\n @api.onchange('receipt_invoice_number')\n def _onchange_receipt_invoice_number(self):\n if self.receipt_invoice_number == True:\n self.lock_print_invoice_on_pos = False\n else:\n self.lock_print_invoice_on_pos = True\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if (config.management_session and not config.\n default_cashbox_lines_ids and not config.cash_control):\n raise UserError('Please go to Cash control and add Default Opening'\n )\n if (config.validate_by_user_id and not config.validate_by_user_id.\n pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_voucher_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'voucher'})\n Account = self.env['account.account']\n voucher_account_old_version = Account.sudo().search([('code', '=',\n 'AVC'), ('company_id', '=', user.company_id.id)])\n if voucher_account_old_version:\n voucher_account = voucher_account_old_version[0]\n else:\n voucher_account = Account.sudo().create({'name':\n 'Account voucher', 'code': 'AVC', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AVC\" auto give voucher histories of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_voucher' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n voucher_account.id, 'noupdate': True})\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'voucher')])\n if voucher_journal:\n voucher_journal[0].sudo().write({'voucher': True,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id,\n 'pos_method_type': 'voucher', 'sequence': 101})\n voucher_journal = voucher_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Voucher ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'AVC ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n voucher_journal = Journal.sudo().create({'name': 'Voucher',\n 'code': 'VCJ', 'type': 'cash', 'pos_method_type': 'voucher',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id, 'sequence':\n 101})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_voucher_' + str(voucher_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n voucher_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, voucher_journal.id)]})\n statement = [(0, 0, {'journal_id': voucher_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_credit_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'credit'})\n Account = self.env['account.account']\n credit_account_old_version = Account.sudo().search([('code', '=',\n 'ACJ'), ('company_id', '=', user.company_id.id)])\n if credit_account_old_version:\n credit_account = credit_account_old_version[0]\n else:\n credit_account = Account.sudo().create({'name':\n 'Credit Account', 'code': 'CA', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"CA\" give credit payment customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_credit' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n credit_account.id, 'noupdate': True})\n credit_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'credit')])\n if credit_journal:\n credit_journal[0].sudo().write({'credit': True,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'pos_method_type': 'credit', 'sequence': 102})\n credit_journal = credit_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Credit account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'CA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n credit_journal = Journal.sudo().create({'name':\n 'Customer Credit', 'code': 'CJ', 'type': 'cash',\n 'pos_method_type': 'credit', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': credit_account.\n id, 'default_credit_account_id': credit_account.id,\n 'sequence': 102})\n self.env['ir.model.data'].sudo().create({'name': \n 'credit_journal_' + str(credit_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n credit_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, credit_journal.id)]})\n statement = [(0, 0, {'journal_id': credit_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_rounding_journal(self):\n Journal = self.env['account.journal']\n Account = self.env['account.account']\n user = self.env.user\n rounding_journal = Journal.sudo().search([('code', '=', 'RDJ'), (\n 'company_id', '=', user.company_id.id)])\n if rounding_journal:\n return rounding_journal.sudo().write({'pos_method_type':\n 'rounding'})\n rounding_account_old_version = Account.sudo().search([('code', '=',\n 'AAR'), ('company_id', '=', user.company_id.id)])\n if rounding_account_old_version:\n rounding_account = rounding_account_old_version[0]\n else:\n _logger.info('rounding_account have not')\n rounding_account = Account.sudo().create({'name':\n 'Rounding Account', 'code': 'AAR', 'user_type_id': self.env\n .ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AAR\" give rounding pos order'})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n rounding_account.id, 'noupdate': True})\n rounding_journal = Journal.sudo().search([('pos_method_type', '=',\n 'rounding'), ('company_id', '=', user.company_id.id)])\n if rounding_journal:\n rounding_journal[0].sudo().write({'name': 'Rounding',\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'pos_method_type': 'rounding', 'code': 'RDJ'})\n rounding_journal = rounding_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'rounding account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n rounding_journal = Journal.sudo().create({'name': 'Rounding',\n 'code': 'RDJ', 'type': 'cash', 'pos_method_type':\n 'rounding', 'journal_user': True, 'sequence_id':\n new_sequence.id, 'company_id': user.company_id.id,\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_journal_' + str(rounding_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n rounding_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, rounding_journal.id)]})\n statement = [(0, 0, {'journal_id': rounding_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if (config.management_session and not config.\n default_cashbox_lines_ids and not config.cash_control):\n raise UserError('Please go to Cash control and add Default Opening'\n )\n if (config.validate_by_user_id and not config.validate_by_user_id.\n pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_voucher_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'voucher'})\n Account = self.env['account.account']\n voucher_account_old_version = Account.sudo().search([('code', '=',\n 'AVC'), ('company_id', '=', user.company_id.id)])\n if voucher_account_old_version:\n voucher_account = voucher_account_old_version[0]\n else:\n voucher_account = Account.sudo().create({'name':\n 'Account voucher', 'code': 'AVC', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AVC\" auto give voucher histories of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_voucher' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n voucher_account.id, 'noupdate': True})\n voucher_journal = Journal.sudo().search([('code', '=', 'VCJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'voucher')])\n if voucher_journal:\n voucher_journal[0].sudo().write({'voucher': True,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id,\n 'pos_method_type': 'voucher', 'sequence': 101})\n voucher_journal = voucher_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Voucher ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'AVC ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n voucher_journal = Journal.sudo().create({'name': 'Voucher',\n 'code': 'VCJ', 'type': 'cash', 'pos_method_type': 'voucher',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': voucher_account.id,\n 'default_credit_account_id': voucher_account.id, 'sequence':\n 101})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_voucher_' + str(voucher_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n voucher_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, voucher_journal.id)]})\n statement = [(0, 0, {'journal_id': voucher_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n\n def init_credit_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'credit'})\n Account = self.env['account.account']\n credit_account_old_version = Account.sudo().search([('code', '=',\n 'ACJ'), ('company_id', '=', user.company_id.id)])\n if credit_account_old_version:\n credit_account = credit_account_old_version[0]\n else:\n credit_account = Account.sudo().create({'name':\n 'Credit Account', 'code': 'CA', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"CA\" give credit payment customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_credit' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n credit_account.id, 'noupdate': True})\n credit_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'credit')])\n if credit_journal:\n credit_journal[0].sudo().write({'credit': True,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'pos_method_type': 'credit', 'sequence': 102})\n credit_journal = credit_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Credit account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'CA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n credit_journal = Journal.sudo().create({'name':\n 'Customer Credit', 'code': 'CJ', 'type': 'cash',\n 'pos_method_type': 'credit', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': credit_account.\n id, 'default_credit_account_id': credit_account.id,\n 'sequence': 102})\n self.env['ir.model.data'].sudo().create({'name': \n 'credit_journal_' + str(credit_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n credit_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, credit_journal.id)]})\n statement = [(0, 0, {'journal_id': credit_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_rounding_journal(self):\n Journal = self.env['account.journal']\n Account = self.env['account.account']\n user = self.env.user\n rounding_journal = Journal.sudo().search([('code', '=', 'RDJ'), (\n 'company_id', '=', user.company_id.id)])\n if rounding_journal:\n return rounding_journal.sudo().write({'pos_method_type':\n 'rounding'})\n rounding_account_old_version = Account.sudo().search([('code', '=',\n 'AAR'), ('company_id', '=', user.company_id.id)])\n if rounding_account_old_version:\n rounding_account = rounding_account_old_version[0]\n else:\n _logger.info('rounding_account have not')\n rounding_account = Account.sudo().create({'name':\n 'Rounding Account', 'code': 'AAR', 'user_type_id': self.env\n .ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AAR\" give rounding pos order'})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n rounding_account.id, 'noupdate': True})\n rounding_journal = Journal.sudo().search([('pos_method_type', '=',\n 'rounding'), ('company_id', '=', user.company_id.id)])\n if rounding_journal:\n rounding_journal[0].sudo().write({'name': 'Rounding',\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'pos_method_type': 'rounding', 'code': 'RDJ'})\n rounding_journal = rounding_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'rounding account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n rounding_journal = Journal.sudo().create({'name': 'Rounding',\n 'code': 'RDJ', 'type': 'cash', 'pos_method_type':\n 'rounding', 'journal_user': True, 'sequence_id':\n new_sequence.id, 'company_id': user.company_id.id,\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_journal_' + str(rounding_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n rounding_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, rounding_journal.id)]})\n statement = [(0, 0, {'journal_id': rounding_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if (config.management_session and not config.\n default_cashbox_lines_ids and not config.cash_control):\n raise UserError('Please go to Cash control and add Default Opening'\n )\n if (config.validate_by_user_id and not config.validate_by_user_id.\n pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n\n def init_credit_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'credit'})\n Account = self.env['account.account']\n credit_account_old_version = Account.sudo().search([('code', '=',\n 'ACJ'), ('company_id', '=', user.company_id.id)])\n if credit_account_old_version:\n credit_account = credit_account_old_version[0]\n else:\n credit_account = Account.sudo().create({'name':\n 'Credit Account', 'code': 'CA', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"CA\" give credit payment customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_credit' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n credit_account.id, 'noupdate': True})\n credit_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'credit')])\n if credit_journal:\n credit_journal[0].sudo().write({'credit': True,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'pos_method_type': 'credit', 'sequence': 102})\n credit_journal = credit_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Credit account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'CA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n credit_journal = Journal.sudo().create({'name':\n 'Customer Credit', 'code': 'CJ', 'type': 'cash',\n 'pos_method_type': 'credit', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': credit_account.\n id, 'default_credit_account_id': credit_account.id,\n 'sequence': 102})\n self.env['ir.model.data'].sudo().create({'name': \n 'credit_journal_' + str(credit_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n credit_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, credit_journal.id)]})\n statement = [(0, 0, {'journal_id': credit_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_rounding_journal(self):\n Journal = self.env['account.journal']\n Account = self.env['account.account']\n user = self.env.user\n rounding_journal = Journal.sudo().search([('code', '=', 'RDJ'), (\n 'company_id', '=', user.company_id.id)])\n if rounding_journal:\n return rounding_journal.sudo().write({'pos_method_type':\n 'rounding'})\n rounding_account_old_version = Account.sudo().search([('code', '=',\n 'AAR'), ('company_id', '=', user.company_id.id)])\n if rounding_account_old_version:\n rounding_account = rounding_account_old_version[0]\n else:\n _logger.info('rounding_account have not')\n rounding_account = Account.sudo().create({'name':\n 'Rounding Account', 'code': 'AAR', 'user_type_id': self.env\n .ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AAR\" give rounding pos order'})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n rounding_account.id, 'noupdate': True})\n rounding_journal = Journal.sudo().search([('pos_method_type', '=',\n 'rounding'), ('company_id', '=', user.company_id.id)])\n if rounding_journal:\n rounding_journal[0].sudo().write({'name': 'Rounding',\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'pos_method_type': 'rounding', 'code': 'RDJ'})\n rounding_journal = rounding_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'rounding account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n rounding_journal = Journal.sudo().create({'name': 'Rounding',\n 'code': 'RDJ', 'type': 'cash', 'pos_method_type':\n 'rounding', 'journal_user': True, 'sequence_id':\n new_sequence.id, 'company_id': user.company_id.id,\n 'default_debit_account_id': rounding_account.id,\n 'default_credit_account_id': rounding_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'rounding_journal_' + str(rounding_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n rounding_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, rounding_journal.id)]})\n statement = [(0, 0, {'journal_id': rounding_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if (config.management_session and not config.\n default_cashbox_lines_ids and not config.cash_control):\n raise UserError('Please go to Cash control and add Default Opening'\n )\n if (config.validate_by_user_id and not config.validate_by_user_id.\n pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n\n def init_credit_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n voucher_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id)])\n if voucher_journal:\n return voucher_journal.sudo().write({'pos_method_type': 'credit'})\n Account = self.env['account.account']\n credit_account_old_version = Account.sudo().search([('code', '=',\n 'ACJ'), ('company_id', '=', user.company_id.id)])\n if credit_account_old_version:\n credit_account = credit_account_old_version[0]\n else:\n credit_account = Account.sudo().create({'name':\n 'Credit Account', 'code': 'CA', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"CA\" give credit payment customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_credit' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n credit_account.id, 'noupdate': True})\n credit_journal = Journal.sudo().search([('code', '=', 'CJ'), (\n 'company_id', '=', user.company_id.id), ('pos_method_type', '=',\n 'credit')])\n if credit_journal:\n credit_journal[0].sudo().write({'credit': True,\n 'default_debit_account_id': credit_account.id,\n 'default_credit_account_id': credit_account.id,\n 'pos_method_type': 'credit', 'sequence': 102})\n credit_journal = credit_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Credit account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'CA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n credit_journal = Journal.sudo().create({'name':\n 'Customer Credit', 'code': 'CJ', 'type': 'cash',\n 'pos_method_type': 'credit', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': credit_account.\n id, 'default_credit_account_id': credit_account.id,\n 'sequence': 102})\n self.env['ir.model.data'].sudo().create({'name': \n 'credit_journal_' + str(credit_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n credit_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, credit_journal.id)]})\n statement = [(0, 0, {'journal_id': credit_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n <function token>\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(pos_config, self).create(vals)\n if (config.management_session and not config.\n default_cashbox_lines_ids and not config.cash_control):\n raise UserError('Please go to Cash control and add Default Opening'\n )\n if (config.validate_by_user_id and not config.validate_by_user_id.\n pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return config\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n <function token>\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n @api.multi\n def remove_database(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n <function token>\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n <function token>\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n <function token>\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n <function token>\n\n @api.multi\n def open_ui(self):\n res = super(pos_config, self).open_ui()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n\n @api.onchange('staff_level')\n def on_change_staff_level(self):\n if self.staff_level and self.staff_level == 'manager':\n self.lock_order_printed_receipt = False\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n <function token>\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n <function token>\n <function token>\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @api.multi\n def remove_caches(self):\n for config in self:\n sessions = self.env['pos.session'].search([('config_id', '=',\n config.id)])\n for session in sessions:\n self.env['bus.bus'].sendmany([[(self.env.cr.dbname,\n 'pos.indexed_db', session.user_id.id), json.dumps({'db':\n self.env.cr.dbname})]])\n if session.state != 'closed':\n session.action_pos_session_closing_control()\n return {'type': 'ir.actions.act_url', 'url': '/pos/web/',\n 'target': 'self'}\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n <function token>\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n <function token>\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n <function token>\n <function token>\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n <function token>\n\n @api.multi\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False\n ) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get(\n 'expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n for config in self:\n if vals.get('management_session', False) and not vals.get(\n 'default_cashbox_lines_ids'):\n if (not config.default_cashbox_lines_ids and not config.\n cash_control):\n raise UserError(\n 'Please go to Cash control and add Default Opening')\n res = super(pos_config, self).write(vals)\n for config in self:\n if (config.validate_by_user_id and not config.\n validate_by_user_id.pos_security_pin):\n raise UserError(\n 'Validate user %s have not set pos security pin, please go to Users menu and input security password'\n % config.validate_by_user_id.name)\n if (config.discount_unlock_limit_user_id and not config.\n discount_unlock_limit_user_id.pos_security_pin):\n raise UserError(\n 'User Unlock limit discount: %s ,have not set pos security pin, please go to Users menu and input security password'\n % config.discount_unlock_limit_user_id.name)\n return res\n <function token>\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n <function token>\n <function token>\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n\n @api.onchange('lock_print_invoice_on_pos')\n def _onchange_lock_print_invoice_on_pos(self):\n if self.lock_print_invoice_on_pos == True:\n self.receipt_invoice_number = False\n self.send_invoice_email = True\n else:\n self.receipt_invoice_number = True\n self.send_invoice_email = False\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n <function token>\n <function token>\n <function token>\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n <function token>\n <function token>\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n <function token>\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n <function token>\n <function token>\n <function token>\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n <function token>\n <function token>\n\n @api.multi\n def open_session_cb(self):\n res = super(pos_config, self).open_session_cb()\n self.init_voucher_journal()\n self.init_wallet_journal()\n self.init_credit_journal()\n self.init_return_order_journal()\n self.init_rounding_journal()\n return res\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n\n @api.model\n def install_data(self, model_name=None, min_id=0, max_id=1999):\n cache_obj = self.env['pos.cache.database'].with_context(prefetch_fields\n =False)\n log_obj = self.env['pos.call.log'].with_context(prefetch_fields=False)\n domain = [('id', '>=', min_id), ('id', '<=', max_id)]\n if model_name == 'product.product':\n domain.append(('available_in_pos', '=', True))\n field_list = cache_obj.get_fields_by_model(model_name)\n self.env.cr.execute(\n \"select id from pos_call_log where min_id=%s and max_id=%s and call_model='%s'\"\n % (min_id, max_id, model_name))\n old_logs = self.env.cr.fetchall()\n datas = None\n if len(old_logs) == 0:\n _logger.info('installing %s from %s to %s' % (model_name,\n min_id, max_id))\n datas = self.env[model_name].with_context(prefetch_fields=False\n ).search_read(domain, field_list)\n version_info = odoo.release.version_info[0]\n if version_info == 12:\n all_fields = self.env[model_name].fields_get()\n for data in datas:\n for field, value in data.items():\n if field == 'model':\n continue\n if all_fields[field] and all_fields[field]['type'] in [\n 'date', 'datetime'] and value:\n data[field] = value.strftime(\n DEFAULT_SERVER_DATETIME_FORMAT)\n vals = {'active': True, 'min_id': min_id, 'max_id': max_id,\n 'call_fields': json.dumps(field_list), 'call_results': json\n .dumps(datas), 'call_model': model_name, 'call_domain':\n json.dumps(domain)}\n log_obj.create(vals)\n else:\n old_log_id = old_logs[0][0]\n old_log = log_obj.browse(old_log_id)\n datas = old_log.call_results\n self.env.cr.commit()\n return datas\n <function token>\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n <function token>\n <function token>\n <function token>\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n <function token>\n <function token>\n <function token>\n\n @api.onchange('pos_auto_invoice')\n def _onchange_pos_auto_invoice(self):\n if self.pos_auto_invoice == True:\n self.iface_invoicing = True\n else:\n self.iface_invoicing = False\n <function token>\n <function token>\n <function token>\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n\n def init_return_order_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return return_journal.sudo().write({'pos_method_type': 'return'})\n Account = self.env['account.account']\n return_account_old_version = Account.sudo().search([('code', '=',\n 'ARO'), ('company_id', '=', user.company_id.id)])\n if return_account_old_version:\n return_account = return_account_old_version[0]\n else:\n return_account = Account.sudo().create({'name':\n 'Return Order Account', 'code': 'ARO', 'user_type_id': self\n .env.ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"ARO\" give return order from customer'})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_account' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n return_account.id, 'noupdate': True})\n return_journal = Journal.sudo().search([('code', '=', 'ROJ'), (\n 'company_id', '=', user.company_id.id)])\n if return_journal:\n return_journal[0].sudo().write({'default_debit_account_id':\n return_account.id, 'default_credit_account_id':\n return_account.id, 'pos_method_type': 'return'})\n return_journal = return_journal[0]\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Return account ' + str(user.company_id.id), 'padding': 3,\n 'prefix': 'RA ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n return_journal = Journal.sudo().create({'name':\n 'Return Order Customer', 'code': 'ROJ', 'type': 'cash',\n 'pos_method_type': 'return', 'journal_user': True,\n 'sequence_id': new_sequence.id, 'company_id': user.\n company_id.id, 'default_debit_account_id': return_account.\n id, 'default_credit_account_id': return_account.id,\n 'sequence': 103})\n self.env['ir.model.data'].sudo().create({'name': \n 'return_journal_' + str(return_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n return_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, return_journal.id)]})\n statement = [(0, 0, {'journal_id': return_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return True\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n\n @api.model\n def get_cached_file(self):\n start = timeit.default_timer()\n _logger.info('==> begin get_cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if not os.path.exists(file_name):\n return False\n else:\n with open(file_name) as f:\n datas = json.load(f)\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return datas\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n <function token>\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def init_wallet_journal(self):\n Journal = self.env['account.journal']\n user = self.env.user\n wallet_journal = Journal.sudo().search([('code', '=', 'UWJ'), (\n 'company_id', '=', user.company_id.id)])\n if wallet_journal:\n return wallet_journal.sudo().write({'pos_method_type': 'wallet'})\n Account = self.env['account.account']\n wallet_account_old_version = Account.sudo().search([('code', '=',\n 'AUW'), ('company_id', '=', user.company_id.id)])\n if wallet_account_old_version:\n wallet_account = wallet_account_old_version[0]\n else:\n wallet_account = Account.sudo().create({'name':\n 'Account wallet', 'code': 'AUW', 'user_type_id': self.env.\n ref('account.data_account_type_current_assets').id,\n 'company_id': user.company_id.id, 'note':\n 'code \"AUW\" auto give wallet amount of customers'})\n self.env['ir.model.data'].sudo().create({'name': \n 'account_use_wallet' + str(user.company_id.id), 'model':\n 'account.account', 'module': 'pos_retail', 'res_id':\n wallet_account.id, 'noupdate': True})\n wallet_journal_inactive = Journal.sudo().search([('code', '=',\n 'UWJ'), ('company_id', '=', user.company_id.id), (\n 'pos_method_type', '=', 'wallet')])\n if wallet_journal_inactive:\n wallet_journal_inactive.sudo().write({\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id,\n 'pos_method_type': 'wallet', 'sequence': 100})\n wallet_journal = wallet_journal_inactive\n else:\n new_sequence = self.env['ir.sequence'].sudo().create({'name': \n 'Account Default Wallet Journal ' + str(user.company_id.id),\n 'padding': 3, 'prefix': 'UW ' + str(user.company_id.id)})\n self.env['ir.model.data'].sudo().create({'name': \n 'journal_sequence' + str(new_sequence.id), 'model':\n 'ir.sequence', 'module': 'pos_retail', 'res_id':\n new_sequence.id, 'noupdate': True})\n wallet_journal = Journal.sudo().create({'name': 'Wallet',\n 'code': 'UWJ', 'type': 'cash', 'pos_method_type': 'wallet',\n 'journal_user': True, 'sequence_id': new_sequence.id,\n 'company_id': user.company_id.id,\n 'default_debit_account_id': wallet_account.id,\n 'default_credit_account_id': wallet_account.id, 'sequence':\n 100})\n self.env['ir.model.data'].sudo().create({'name': \n 'use_wallet_journal_' + str(wallet_journal.id), 'model':\n 'account.journal', 'module': 'pos_retail', 'res_id': int(\n wallet_journal.id), 'noupdate': True})\n config = self\n config.sudo().write({'journal_ids': [(4, wallet_journal.id)]})\n statement = [(0, 0, {'journal_id': wallet_journal.id, 'user_id':\n user.id, 'company_id': user.company_id.id})]\n current_session = config.current_session_id\n current_session.sudo().write({'statement_ids': statement})\n return\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n <function token>\n\n def get_fields_by_model(self, model):\n all_fields = self.env[model].fields_get()\n fields_list = []\n for field, value in all_fields.items():\n if field == 'model' or all_fields[field]['type'] in ['one2many',\n 'binary']:\n continue\n else:\n fields_list.append(field)\n return fields_list\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @api.model\n def store_cached_file(self, datas):\n start = timeit.default_timer()\n _logger.info('==> begin cached_file')\n os.chdir(os.path.dirname(__file__))\n path = os.getcwd()\n file_name = path + '/pos.json'\n if os.path.exists(file_name):\n os.remove(file_name)\n with io.open(file_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(datas, indent=4, sort_keys=True, separators=(\n ',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n stop = timeit.default_timer()\n _logger.info(stop - start)\n return True\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass pos_config(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n"
] | false |
98,358 |
06a26ffbad2fb15f6f73c00ff2e69027c4267d10
|
# -*- coding: utf-8 -*-
"Representation of the reference value of a function."
# Copyright (C) 2008-2016 Martin Sandve Alnæs
#
# This file is part of UFL.
#
# UFL is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UFL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with UFL. If not, see <http://www.gnu.org/licenses/>.
from ufl.core.ufl_type import ufl_type
from ufl.core.operator import Operator
from ufl.core.terminal import FormArgument
from ufl.log import error
@ufl_type(num_ops=1,
is_index_free=True,
is_terminal_modifier=True,
is_in_reference_frame=True)
class ReferenceValue(Operator):
"Representation of the reference cell value of a form argument."
__slots__ = ()
def __init__(self, f):
if not isinstance(f, FormArgument):
error("Can only take reference value of form arguments.")
Operator.__init__(self, (f,))
@property
def ufl_shape(self):
return self.ufl_operands[0].ufl_element().reference_value_shape()
def evaluate(self, x, mapping, component, index_values, derivatives=()):
"Get child from mapping and return the component asked for."
error("Evaluate not implemented.")
def __str__(self):
return "reference_value(%s)" % self.ufl_operands[0]
|
[
"# -*- coding: utf-8 -*-\n\"Representation of the reference value of a function.\"\n\n# Copyright (C) 2008-2016 Martin Sandve Alnæs\n#\n# This file is part of UFL.\n#\n# UFL is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# UFL is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with UFL. If not, see <http://www.gnu.org/licenses/>.\n\nfrom ufl.core.ufl_type import ufl_type\nfrom ufl.core.operator import Operator\nfrom ufl.core.terminal import FormArgument\nfrom ufl.log import error\n\n\n@ufl_type(num_ops=1,\n is_index_free=True,\n is_terminal_modifier=True,\n is_in_reference_frame=True)\nclass ReferenceValue(Operator):\n \"Representation of the reference cell value of a form argument.\"\n __slots__ = ()\n\n def __init__(self, f):\n if not isinstance(f, FormArgument):\n error(\"Can only take reference value of form arguments.\")\n Operator.__init__(self, (f,))\n\n @property\n def ufl_shape(self):\n return self.ufl_operands[0].ufl_element().reference_value_shape()\n\n def evaluate(self, x, mapping, component, index_values, derivatives=()):\n \"Get child from mapping and return the component asked for.\"\n error(\"Evaluate not implemented.\")\n\n def __str__(self):\n return \"reference_value(%s)\" % self.ufl_operands[0]\n",
"<docstring token>\nfrom ufl.core.ufl_type import ufl_type\nfrom ufl.core.operator import Operator\nfrom ufl.core.terminal import FormArgument\nfrom ufl.log import error\n\n\n@ufl_type(num_ops=1, is_index_free=True, is_terminal_modifier=True,\n is_in_reference_frame=True)\nclass ReferenceValue(Operator):\n \"\"\"Representation of the reference cell value of a form argument.\"\"\"\n __slots__ = ()\n\n def __init__(self, f):\n if not isinstance(f, FormArgument):\n error('Can only take reference value of form arguments.')\n Operator.__init__(self, (f,))\n\n @property\n def ufl_shape(self):\n return self.ufl_operands[0].ufl_element().reference_value_shape()\n\n def evaluate(self, x, mapping, component, index_values, derivatives=()):\n \"\"\"Get child from mapping and return the component asked for.\"\"\"\n error('Evaluate not implemented.')\n\n def __str__(self):\n return 'reference_value(%s)' % self.ufl_operands[0]\n",
"<docstring token>\n<import token>\n\n\n@ufl_type(num_ops=1, is_index_free=True, is_terminal_modifier=True,\n is_in_reference_frame=True)\nclass ReferenceValue(Operator):\n \"\"\"Representation of the reference cell value of a form argument.\"\"\"\n __slots__ = ()\n\n def __init__(self, f):\n if not isinstance(f, FormArgument):\n error('Can only take reference value of form arguments.')\n Operator.__init__(self, (f,))\n\n @property\n def ufl_shape(self):\n return self.ufl_operands[0].ufl_element().reference_value_shape()\n\n def evaluate(self, x, mapping, component, index_values, derivatives=()):\n \"\"\"Get child from mapping and return the component asked for.\"\"\"\n error('Evaluate not implemented.')\n\n def __str__(self):\n return 'reference_value(%s)' % self.ufl_operands[0]\n",
"<docstring token>\n<import token>\n\n\n@ufl_type(num_ops=1, is_index_free=True, is_terminal_modifier=True,\n is_in_reference_frame=True)\nclass ReferenceValue(Operator):\n <docstring token>\n __slots__ = ()\n\n def __init__(self, f):\n if not isinstance(f, FormArgument):\n error('Can only take reference value of form arguments.')\n Operator.__init__(self, (f,))\n\n @property\n def ufl_shape(self):\n return self.ufl_operands[0].ufl_element().reference_value_shape()\n\n def evaluate(self, x, mapping, component, index_values, derivatives=()):\n \"\"\"Get child from mapping and return the component asked for.\"\"\"\n error('Evaluate not implemented.')\n\n def __str__(self):\n return 'reference_value(%s)' % self.ufl_operands[0]\n",
"<docstring token>\n<import token>\n\n\n@ufl_type(num_ops=1, is_index_free=True, is_terminal_modifier=True,\n is_in_reference_frame=True)\nclass ReferenceValue(Operator):\n <docstring token>\n <assignment token>\n\n def __init__(self, f):\n if not isinstance(f, FormArgument):\n error('Can only take reference value of form arguments.')\n Operator.__init__(self, (f,))\n\n @property\n def ufl_shape(self):\n return self.ufl_operands[0].ufl_element().reference_value_shape()\n\n def evaluate(self, x, mapping, component, index_values, derivatives=()):\n \"\"\"Get child from mapping and return the component asked for.\"\"\"\n error('Evaluate not implemented.')\n\n def __str__(self):\n return 'reference_value(%s)' % self.ufl_operands[0]\n",
"<docstring token>\n<import token>\n\n\n@ufl_type(num_ops=1, is_index_free=True, is_terminal_modifier=True,\n is_in_reference_frame=True)\nclass ReferenceValue(Operator):\n <docstring token>\n <assignment token>\n <function token>\n\n @property\n def ufl_shape(self):\n return self.ufl_operands[0].ufl_element().reference_value_shape()\n\n def evaluate(self, x, mapping, component, index_values, derivatives=()):\n \"\"\"Get child from mapping and return the component asked for.\"\"\"\n error('Evaluate not implemented.')\n\n def __str__(self):\n return 'reference_value(%s)' % self.ufl_operands[0]\n",
"<docstring token>\n<import token>\n\n\n@ufl_type(num_ops=1, is_index_free=True, is_terminal_modifier=True,\n is_in_reference_frame=True)\nclass ReferenceValue(Operator):\n <docstring token>\n <assignment token>\n <function token>\n\n @property\n def ufl_shape(self):\n return self.ufl_operands[0].ufl_element().reference_value_shape()\n <function token>\n\n def __str__(self):\n return 'reference_value(%s)' % self.ufl_operands[0]\n",
"<docstring token>\n<import token>\n\n\n@ufl_type(num_ops=1, is_index_free=True, is_terminal_modifier=True,\n is_in_reference_frame=True)\nclass ReferenceValue(Operator):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def __str__(self):\n return 'reference_value(%s)' % self.ufl_operands[0]\n",
"<docstring token>\n<import token>\n\n\n@ufl_type(num_ops=1, is_index_free=True, is_terminal_modifier=True,\n is_in_reference_frame=True)\nclass ReferenceValue(Operator):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
98,359 |
46deb53260477c4eddae31f1eac5f41842238ed1
|
import csv
import datetime
name = input('What country are you interested in (e.g. USA)? ')
year_list = []
count_list = []
YEAR_INDEX = 0
COUNTRY_INDEX = 5
MF_INDEX = 6
CATEGORY_INDEX = 2
EVENT_INDEX = 3
years = {}
with open('summer.csv', 'r', encoding="utf-8") as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for row in reader:
mf = row[MF_INDEX]
event = row[EVENT_INDEX]
country = row[COUNTRY_INDEX]
if country == name and event == 'Swimming':
dt = int(row[YEAR_INDEX])
full_date = datetime.date(dt, 1, 1)
if not years.get(full_date):
years[full_date] = 0
years[full_date] += 1
year_list = sorted(years)
count_list = []
for y in year_list:
count_list.append(years[y])
print(str(y) + ": " + str(years[y]))
|
[
"import csv\nimport datetime\n\n\nname = input('What country are you interested in (e.g. USA)? ')\nyear_list = []\ncount_list = []\n\nYEAR_INDEX = 0\nCOUNTRY_INDEX = 5\nMF_INDEX = 6\nCATEGORY_INDEX = 2\nEVENT_INDEX = 3\nyears = {}\nwith open('summer.csv', 'r', encoding=\"utf-8\") as csvfile:\n reader = csv.reader(csvfile)\n next(reader, None)\n for row in reader:\n mf = row[MF_INDEX]\n event = row[EVENT_INDEX]\n country = row[COUNTRY_INDEX]\n if country == name and event == 'Swimming':\n dt = int(row[YEAR_INDEX])\n full_date = datetime.date(dt, 1, 1)\n if not years.get(full_date):\n years[full_date] = 0\n years[full_date] += 1\n\nyear_list = sorted(years)\ncount_list = []\nfor y in year_list:\n count_list.append(years[y])\n print(str(y) + \": \" + str(years[y]))\n",
"import csv\nimport datetime\nname = input('What country are you interested in (e.g. USA)? ')\nyear_list = []\ncount_list = []\nYEAR_INDEX = 0\nCOUNTRY_INDEX = 5\nMF_INDEX = 6\nCATEGORY_INDEX = 2\nEVENT_INDEX = 3\nyears = {}\nwith open('summer.csv', 'r', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile)\n next(reader, None)\n for row in reader:\n mf = row[MF_INDEX]\n event = row[EVENT_INDEX]\n country = row[COUNTRY_INDEX]\n if country == name and event == 'Swimming':\n dt = int(row[YEAR_INDEX])\n full_date = datetime.date(dt, 1, 1)\n if not years.get(full_date):\n years[full_date] = 0\n years[full_date] += 1\nyear_list = sorted(years)\ncount_list = []\nfor y in year_list:\n count_list.append(years[y])\n print(str(y) + ': ' + str(years[y]))\n",
"<import token>\nname = input('What country are you interested in (e.g. USA)? ')\nyear_list = []\ncount_list = []\nYEAR_INDEX = 0\nCOUNTRY_INDEX = 5\nMF_INDEX = 6\nCATEGORY_INDEX = 2\nEVENT_INDEX = 3\nyears = {}\nwith open('summer.csv', 'r', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile)\n next(reader, None)\n for row in reader:\n mf = row[MF_INDEX]\n event = row[EVENT_INDEX]\n country = row[COUNTRY_INDEX]\n if country == name and event == 'Swimming':\n dt = int(row[YEAR_INDEX])\n full_date = datetime.date(dt, 1, 1)\n if not years.get(full_date):\n years[full_date] = 0\n years[full_date] += 1\nyear_list = sorted(years)\ncount_list = []\nfor y in year_list:\n count_list.append(years[y])\n print(str(y) + ': ' + str(years[y]))\n",
"<import token>\n<assignment token>\nwith open('summer.csv', 'r', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile)\n next(reader, None)\n for row in reader:\n mf = row[MF_INDEX]\n event = row[EVENT_INDEX]\n country = row[COUNTRY_INDEX]\n if country == name and event == 'Swimming':\n dt = int(row[YEAR_INDEX])\n full_date = datetime.date(dt, 1, 1)\n if not years.get(full_date):\n years[full_date] = 0\n years[full_date] += 1\n<assignment token>\nfor y in year_list:\n count_list.append(years[y])\n print(str(y) + ': ' + str(years[y]))\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,360 |
ac9e3ab1f3be57baaa36175adfb5ca4a29d79e91
|
"""
This module provides a minimal type system, and ways to promote types, as
well as ways to convert to an LLVM type system. A set of predefined types are
defined. Types may be sliced to turn them into array types, in the same way
as the memoryview syntax.
>>> char
char
>>> int8[:, :, :]
int8[:, :, :]
>>> int8.signed
True
>>> uint8
uint8
>>> uint8.signed
False
>>> char.pointer()
char *
>>> int_[:, ::1]
int[:, ::1]
>>> int_[::1, :]
int[::1, :]
>>> double[:, ::1, :]
Traceback (most recent call last):
...
InvalidTypeSpecification: Step may only be provided once, and only in the first or last dimension.
"""
__all__ = ['Py_ssize_t', 'void', 'char', 'uchar', 'int_', 'long_', 'bool_', 'object_',
'float_', 'double', 'longdouble', 'float32', 'float64', 'float128',
'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64',
'complex64', 'complex128', 'complex256', 'npy_intp']
import sys
import math
import ctypes
try:
import llvm.core
from llvm import core as lc
except ImportError:
llvm = None
import miniutils
import minierror
# Check below taken from Numba
if sys.maxint > 2**33:
_plat_bits = 64
else:
_plat_bits = 32
class TypeMapper(object):
"""
>>> import miniast
>>> context = miniast.Context()
>>> miniast.typemapper = TypeMapper(context)
>>> tm = context.typemapper
>>> tm.promote_types(int8, double)
double
>>> tm.promote_types(int8, uint8)
uint8
>>> tm.promote_types(int8, complex128)
complex128
>>> tm.promote_types(int8, object_)
PyObject *
>>> tm.promote_types(int64, float32)
float
>>> tm.promote_types(int64, complex64)
complex64
>>> tm.promote_types(float32, float64)
double
>>> tm.promote_types(float32, complex64)
complex64
>>> tm.promote_types(complex64, complex128)
complex128
>>> tm.promote_types(complex256, object_)
PyObject *
>>> tm.promote_types(float32.pointer(), Py_ssize_t)
float *
>>> tm.promote_types(float32.pointer(), Py_ssize_t)
float *
>>> tm.promote_types(float32.pointer(), uint8)
float *
>>> tm.promote_types(float32.pointer(), float64.pointer())
Traceback (most recent call last):
...
UnpromotableTypeError: (float *, double *)
>>> tm.promote_types(float32[:, ::1], float32[:, ::1])
float[:, ::1]
>>> tm.promote_types(float32[:, ::1], float64[:, ::1])
double[:, ::1]
>>> tm.promote_types(float32[:, ::1], float64[::1, :])
double[:, :]
>>> tm.promote_types(float32[:, :], complex128[:, :])
complex128[:, :]
>>> tm.promote_types(int_[:, :], object_[:, ::1])
PyObject *[:, :]
"""
def __init__(self, context):
self.context = context
def map_type(self, opaque_type):
if opaque_type.is_int:
return int_
elif opaque_type.is_float:
return float_
elif opaque_type.is_double:
return double
elif opaque_type.is_pointer:
return PointerType(self.map_type(opaque_type.base_type))
elif opaque_type.is_py_ssize_t:
return Py_ssize_t
elif opaque_type.is_char:
return char
else:
raise minierror.UnmappableTypeError(opaque_type)
def to_llvm(self, type):
"Return an LLVM type for the given type."
raise NotImplementedError
def from_python(self, value):
"Get a type from a python value"
np = sys.modules.get('numpy', None)
if isinstance(value, float):
return double
elif isinstance(value, (int, long)):
return int_
elif isinstance(value, complex):
return complex128
elif np and isinstance(value, np.ndarray):
dtype = map_dtype(value.dtype)
return ArrayType(dtype, value.ndim,
is_c_contig=value.flags['C_CONTIGUOUS'],
is_f_contig=value.flags['F_CONTIGUOUS'])
else:
return object_
# raise minierror.UnmappableTypeError(type(value))
def promote_numeric(self, type1, type2):
"Promote two numeric types"
return max([type1, type2], key=lambda type: type.rank)
def promote_arrays(self, type1, type2):
"Promote two array types in an expression to a new array type"
equal_ndim = type1.ndim == type2.ndim
return ArrayType(self.promote_types(type1.dtype, type2.dtype),
ndim=max(type1.ndim, type2.ndim),
is_c_contig=(equal_ndim and type1.is_c_contig and
type2.is_c_contig),
is_f_contig=(equal_ndim and type1.is_f_contig and
type2.is_f_contig))
def promote_types(self, type1, type2):
"Promote two arbitrary types"
if type1.is_pointer and type2.is_int_like:
return type1
elif type2.is_pointer and type2.is_int_like:
return type2
elif type1.is_object or type2.is_object:
return object_
elif type1.is_numeric and type2.is_numeric:
return self.promote_numeric(type1, type2)
elif type1.is_array and type2:
return self.promote_arrays(type1, type2)
else:
raise minierror.UnpromotableTypeError((type1, type2))
def map_dtype(dtype):
"""
>>> _map_dtype(np.dtype(np.int32))
int32
>>> _map_dtype(np.dtype(np.int64))
int64
>>> _map_dtype(np.dtype(np.object))
PyObject *
>>> _map_dtype(np.dtype(np.float64))
double
>>> _map_dtype(np.dtype(np.complex128))
complex128
"""
item_idx = int(math.log(dtype.itemsize, 2))
if dtype.kind == 'i':
return [int8, int16, int32, int64][item_idx]
elif dtype.kind == 'u':
return [uint8, uint16, uint32, uint64][item_idx]
elif dtype.kind == 'f':
if dtype.itemsize == 2:
pass # half floats not supported yet
elif dtype.itemsize == 4:
return float32
elif dtype.itemsize == 8:
return float64
elif dtype.itemsize == 16:
return float128
elif dtype.kind == 'b':
return int8
elif dtype.kind == 'c':
if dtype.itemsize == 8:
return complex64
elif dtype.itemsize == 16:
return complex128
elif dtype.itemsize == 32:
return complex256
elif dtype.kind == 'O':
return object_
NONE_KIND = 0
INT_KIND = 1
FLOAT_KIND = 2
COMPLEX_KIND = 3
class Type(miniutils.ComparableObjectMixin):
"""
Base class for all types.
.. attribute:: subtypes
The list of subtypes to allow comparing and hashing them recursively
"""
is_array = False
is_pointer = False
is_typewrapper = False
is_bool = False
is_numeric = False
is_py_ssize_t = False
is_char = False
is_int = False
is_float = False
is_c_string = False
is_object = False
is_function = False
is_int_like = False
is_complex = False
is_void = False
kind = NONE_KIND
subtypes = []
def __init__(self, **kwds):
vars(self).update(kwds)
self.qualifiers = kwds.get('qualifiers', frozenset())
def qualify(self, *qualifiers):
"Qualify this type with a qualifier such as ``const`` or ``restrict``"
qualifiers = list(qualifiers)
qualifiers.extend(self.qualifiers)
attribs = dict(vars(self), qualifiers=qualifiers)
return type(self)(**attribs)
def unqualify(self, *unqualifiers):
"Remove the given qualifiers from the type"
unqualifiers = set(unqualifiers)
qualifiers = [q for q in self.qualifiers if q not in unqualifiers]
attribs = dict(vars(self), qualifiers=qualifiers)
return type(self)(**attribs)
def pointer(self):
"Get a pointer to this type"
return PointerType(self)
@property
def subtype_list(self):
return [getattr(self, subtype) for subtype in self.subtypes]
@property
def comparison_type_list(self):
return self.subtype_list
def __eq__(self, other):
# Don't use isinstance here, compare on exact type to be consistent
# with __hash__. Override where sensible
return (type(self) is type(other) and
self.comparison_type_list == other.comparison_type_list)
def __ne__(self, other):
return not self == other
def __hash__(self):
h = hash(type(self))
for subtype in self.comparison_type_list:
h = h ^ hash(subtype)
return h
def __getitem__(self, item):
assert isinstance(item, (tuple, slice))
def verify_slice(s):
if s.start or s.stop or s.step not in (None, 1):
raise minierror.InvalidTypeSpecification(
"Only a step of 1 may be provided to indicate C or "
"Fortran contiguity")
if isinstance(item, tuple):
step_idx = None
for idx, s in enumerate(item):
verify_slice(s)
if s.step and (step_idx or idx not in (0, len(item) - 1)):
raise minierror.InvalidTypeSpecification(
"Step may only be provided once, and only in the "
"first or last dimension.")
if s.step == 1:
step_idx = idx
return ArrayType(self, len(item),
is_c_contig=step_idx == len(item) - 1,
is_f_contig=step_idx == 0)
else:
verify_slice(item)
return ArrayType(self, 1, is_c_contig=bool(item.step))
def to_llvm(self, context):
"Get a corresponding llvm type from this type"
return context.to_llvm(self)
def __getattr__(self, attr):
if attr.startswith('is_'):
return False
return getattr(type(self), attr)
class ArrayType(Type):
is_array = True
subtypes = ['dtype']
def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,
inner_contig=False, broadcasting=None):
super(ArrayType, self).__init__()
self.dtype = dtype
self.ndim = ndim
self.is_c_contig = is_c_contig
self.is_f_contig = is_f_contig
self.inner_contig = inner_contig or is_c_contig or is_f_contig
self.broadcasting = broadcasting or (True,) * ndim
@property
def comparison_type_list(self):
return [self.dtype, self.is_c_contig, self.is_f_contig, self.inner_contig]
def pointer(self):
raise Exception("You probably want a pointer type to the dtype")
def to_llvm(self, context):
# raise Exception("Obtain a pointer to the dtype and convert that "
# "to an LLVM type")
return context.to_llvm(self)
def __repr__(self):
axes = [":"] * self.ndim
if self.is_c_contig:
axes[-1] = "::1"
elif self.is_f_contig:
axes[0] = "::1"
return "%s[%s]" % (self.dtype, ", ".join(axes))
class PointerType(Type):
is_pointer = True
subtypes = ['base_type']
def __init__(self, base_type, **kwds):
super(PointerType, self).__init__(**kwds)
self.base_type = base_type
def __repr__(self):
return "%s *%s" % (self.base_type, " ".join(self.qualifiers))
def to_llvm(self, context):
return llvm.core.Type.pointer(self.base_type.to_llvm(context))
class CArrayType(Type):
is_carray = True
subtypes = ['base_type']
def __init__(self, base_type, size, **kwds):
super(CArrayType, self).__init__(**kwds)
self.base_type = base_type
self.size = size
def __repr__(self):
return "%s[%d]" % (self.base_type, self.length)
def to_llvm(self, context):
return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)
class TypeWrapper(Type):
is_typewrapper = True
subtypes = ['opaque_type']
def __init__(self, opaque_type, context, **kwds):
super(TypeWrapper, self).__init__(**kwds)
self.opaque_type = opaque_type
self.context = context
def __repr__(self):
return self.context.declare_type(self)
def __deepcopy__(self, memo):
return self
class NamedType(Type):
name = None
def __eq__(self, other):
return isinstance(other, NamedType) and self.name == other.name
def __repr__(self):
if self.qualifiers:
return "%s %s" % (self.name, " ".join(self.qualifiers))
return self.name
class BoolType(NamedType):
is_bool = True
name = "bool"
def __repr__(self):
return "int %s" % " ".join(self.qualifiers)
def to_llvm(self, context):
return int8.to_llvm(context)
class NumericType(NamedType):
"""
Base class for numeric types.
.. attribute:: name
name of the type
.. attribute:: itemsize
sizeof(type)
.. attribute:: rank
ordering of numeric types
"""
is_numeric = True
class IntType(NumericType):
is_int = True
is_int_like = True
name = "int"
signed = True
rank = 4
itemsize = 4
kind = INT_KIND
def to_llvm(self, context):
if self.itemsize == 1:
return lc.Type.int(8)
elif self.itemsize == 2:
return lc.Type.int(16)
elif self.itemsize == 4:
return lc.Type.int(32)
else:
assert self.itemsize == 8, self
return lc.Type.int(64)
class FloatType(NumericType):
is_float = True
kind = FLOAT_KIND
@property
def comparison_type_list(self):
return self.subtype_list + [self.itemsize]
def to_llvm(self, context):
if self.itemsize == 4:
return lc.Type.float()
elif self.itemsize == 8:
return lc.Type.double()
else:
# Note: what about fp80/fp96?
assert self.itemsize == 16
return lc.Type.fp128()
class ComplexType(NumericType):
is_complex = True
subtypes = ['base_type']
kind = COMPLEX_KIND
class Py_ssize_t_Type(IntType):
is_py_ssize_t = True
name = "Py_ssize_t"
rank = 9
signed = True
def __init__(self, **kwds):
super(Py_ssize_t_Type, self).__init__(**kwds)
self.itemsize = _plat_bits / 8
class NPyIntp(IntType):
is_numpy_intp = True
name = "npy_intp"
def __init__(self, **kwds):
super(NPyIntp, self).__init__(**kwds)
import numpy as np
ctypes_array = np.empty(0).ctypes.strides
self.itemsize = ctypes.sizeof(ctypes_array._type_)
class CharType(IntType):
is_char = True
name = "char"
rank = 1
signed = True
def to_llvm(self, context):
return lc.Type.int(8)
class CStringType(Type):
is_c_string = True
def __repr__(self):
return "const char *"
def to_llvm(self, context):
return char.pointer().to_llvm(context)
class VoidType(NamedType):
is_void = True
name = "void"
def to_llvm(self, context):
return lc.Type.void()
class ObjectType(Type):
is_object = True
def __repr__(self):
return "PyObject *"
class FunctionType(Type):
subtypes = ['return_type', 'args']
is_function = True
is_vararg = False
def to_llvm(self, context):
return lc.Type.function(self.return_type.to_llvm(context),
[arg_type.to_llvm(context)
for arg_type in self.args],
self.is_vararg)
def __str__(self):
args = map(str, self.args)
if self.is_vararg:
args.append("...")
return "%s (*)(%s)" % (self.return_type, ", ".join(args))
class VectorType(Type):
subtypes = ['element_type']
is_vector = True
vector_size = None
def __init__(self, element_type, vector_size, **kwds):
super(VectorType, self).__init__(**kwds)
assert ((element_type.is_int or element_type.is_float) and
element_type.itemsize in (4, 8)), element_type
self.element_type = element_type
self.vector_size = vector_size
def to_llvm(self, context):
return lc.Type.vector(self.element_type.to_llvm(context),
self.vector_size)
@property
def comparison_type_list(self):
return self.subtype_list + [self.vector_size]
def __str__(self):
itemsize = self.element_type.itemsize
if self.element_type.is_float:
if itemsize == 4:
return '__m128'
else:
return '__m128d'
else:
if itemsize == 4:
return '__m128i'
else:
raise NotImplementedError
#
### Internal types
#
c_string_type = CStringType()
void = VoidType()
#
### Public types
#
Py_ssize_t = Py_ssize_t_Type()
npy_intp = NPyIntp()
size_t = IntType(name="size_t", rank=8.5, itemsize=8, signed=False)
char = CharType(name="char")
short = IntType(name="short", rank=2, itemsize=2)
int_ = IntType(name="int", rank=4, itemsize=4)
long_ = IntType(name="long", rank=5, itemsize=4)
longlong = IntType(name="PY_LONG_LONG", rank=8, itemsize=8)
uchar = CharType(name="unsigned char", signed=False)
ushort = IntType(name="unsigned short", rank=2.5, itemsize=2, signed=False)
uint = IntType(name="unsigned int", rank=4.5, itemsize=4, signed=False)
ulong = IntType(name="unsigned long", rank=5.5, itemsize=4, signed=False)
ulonglong = IntType(name="unsigned PY_LONG_LONG", rank=8.5, itemsize=8,
signed=False)
bool_ = BoolType()
object_ = ObjectType()
int8 = IntType(name="int8", rank=1, itemsize=1)
int16 = IntType(name="int16", rank=2, itemsize=2)
int32 = IntType(name="int32", rank=4, itemsize=4)
int64 = IntType(name="int64", rank=8, itemsize=8)
uint8 = IntType(name="uint8", rank=1.5, signed=False, itemsize=1)
uint16 = IntType(name="uint16", rank=2.5, signed=False, itemsize=2)
uint32 = IntType(name="uint32", rank=4.5, signed=False, itemsize=4)
uint64 = IntType(name="uint64", rank=8.5, signed=False, itemsize=8)
float32 = float_ = FloatType(name="float", rank=10, itemsize=4)
float64 = double = FloatType(name="double", rank=12, itemsize=8)
float128 = longdouble = FloatType(name="long double", rank=14, itemsize=16)
complex64 = ComplexType(name="complex64", base_type=float32,
rank=16, itemsize=8)
complex128 = ComplexType(name="complex128", base_type=float64,
rank=18, itemsize=16)
complex256 = ComplexType(name="complex256", base_type=float128,
rank=20, itemsize=32)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
[
"\"\"\"\nThis module provides a minimal type system, and ways to promote types, as\nwell as ways to convert to an LLVM type system. A set of predefined types are\ndefined. Types may be sliced to turn them into array types, in the same way\nas the memoryview syntax.\n\n>>> char\nchar\n>>> int8[:, :, :]\nint8[:, :, :]\n>>> int8.signed\nTrue\n>>> uint8\nuint8\n>>> uint8.signed\nFalse\n\n>>> char.pointer()\nchar *\n>>> int_[:, ::1]\nint[:, ::1]\n>>> int_[::1, :]\nint[::1, :]\n>>> double[:, ::1, :]\nTraceback (most recent call last):\n ...\nInvalidTypeSpecification: Step may only be provided once, and only in the first or last dimension.\n\"\"\"\n\n__all__ = ['Py_ssize_t', 'void', 'char', 'uchar', 'int_', 'long_', 'bool_', 'object_',\n 'float_', 'double', 'longdouble', 'float32', 'float64', 'float128',\n 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64',\n 'complex64', 'complex128', 'complex256', 'npy_intp']\n\nimport sys\nimport math\nimport ctypes\n\ntry:\n import llvm.core\n from llvm import core as lc\nexcept ImportError:\n llvm = None\n\nimport miniutils\nimport minierror\n\n# Check below taken from Numba\nif sys.maxint > 2**33:\n _plat_bits = 64\nelse:\n _plat_bits = 32\n\n\nclass TypeMapper(object):\n \"\"\"\n >>> import miniast\n >>> context = miniast.Context()\n >>> miniast.typemapper = TypeMapper(context)\n >>> tm = context.typemapper\n\n >>> tm.promote_types(int8, double)\n double\n >>> tm.promote_types(int8, uint8)\n uint8\n >>> tm.promote_types(int8, complex128)\n complex128\n >>> tm.promote_types(int8, object_)\n PyObject *\n\n >>> tm.promote_types(int64, float32)\n float\n >>> tm.promote_types(int64, complex64)\n complex64\n >>> tm.promote_types(float32, float64)\n double\n >>> tm.promote_types(float32, complex64)\n complex64\n >>> tm.promote_types(complex64, complex128)\n complex128\n >>> tm.promote_types(complex256, object_)\n PyObject *\n\n >>> tm.promote_types(float32.pointer(), Py_ssize_t)\n float *\n >>> tm.promote_types(float32.pointer(), Py_ssize_t)\n float *\n >>> tm.promote_types(float32.pointer(), uint8)\n float *\n\n >>> tm.promote_types(float32.pointer(), float64.pointer())\n Traceback (most recent call last):\n ...\n UnpromotableTypeError: (float *, double *)\n\n >>> tm.promote_types(float32[:, ::1], float32[:, ::1])\n float[:, ::1]\n >>> tm.promote_types(float32[:, ::1], float64[:, ::1])\n double[:, ::1]\n >>> tm.promote_types(float32[:, ::1], float64[::1, :])\n double[:, :]\n >>> tm.promote_types(float32[:, :], complex128[:, :])\n complex128[:, :]\n >>> tm.promote_types(int_[:, :], object_[:, ::1])\n PyObject *[:, :]\n \"\"\"\n\n def __init__(self, context):\n self.context = context\n\n def map_type(self, opaque_type):\n if opaque_type.is_int:\n return int_\n elif opaque_type.is_float:\n return float_\n elif opaque_type.is_double:\n return double\n elif opaque_type.is_pointer:\n return PointerType(self.map_type(opaque_type.base_type))\n elif opaque_type.is_py_ssize_t:\n return Py_ssize_t\n elif opaque_type.is_char:\n return char\n else:\n raise minierror.UnmappableTypeError(opaque_type)\n\n def to_llvm(self, type):\n \"Return an LLVM type for the given type.\"\n raise NotImplementedError\n\n def from_python(self, value):\n \"Get a type from a python value\"\n np = sys.modules.get('numpy', None)\n\n if isinstance(value, float):\n return double\n elif isinstance(value, (int, long)):\n return int_\n elif isinstance(value, complex):\n return complex128\n elif np and isinstance(value, np.ndarray):\n dtype = map_dtype(value.dtype)\n return ArrayType(dtype, value.ndim,\n is_c_contig=value.flags['C_CONTIGUOUS'],\n is_f_contig=value.flags['F_CONTIGUOUS'])\n else:\n return object_\n # raise minierror.UnmappableTypeError(type(value))\n\n def promote_numeric(self, type1, type2):\n \"Promote two numeric types\"\n return max([type1, type2], key=lambda type: type.rank)\n\n def promote_arrays(self, type1, type2):\n \"Promote two array types in an expression to a new array type\"\n equal_ndim = type1.ndim == type2.ndim\n return ArrayType(self.promote_types(type1.dtype, type2.dtype),\n ndim=max(type1.ndim, type2.ndim),\n is_c_contig=(equal_ndim and type1.is_c_contig and\n type2.is_c_contig),\n is_f_contig=(equal_ndim and type1.is_f_contig and\n type2.is_f_contig))\n\n def promote_types(self, type1, type2):\n \"Promote two arbitrary types\"\n if type1.is_pointer and type2.is_int_like:\n return type1\n elif type2.is_pointer and type2.is_int_like:\n return type2\n elif type1.is_object or type2.is_object:\n return object_\n elif type1.is_numeric and type2.is_numeric:\n return self.promote_numeric(type1, type2)\n elif type1.is_array and type2:\n return self.promote_arrays(type1, type2)\n else:\n raise minierror.UnpromotableTypeError((type1, type2))\n\ndef map_dtype(dtype):\n \"\"\"\n >>> _map_dtype(np.dtype(np.int32))\n int32\n >>> _map_dtype(np.dtype(np.int64))\n int64\n >>> _map_dtype(np.dtype(np.object))\n PyObject *\n >>> _map_dtype(np.dtype(np.float64))\n double\n >>> _map_dtype(np.dtype(np.complex128))\n complex128\n \"\"\"\n item_idx = int(math.log(dtype.itemsize, 2))\n if dtype.kind == 'i':\n return [int8, int16, int32, int64][item_idx]\n elif dtype.kind == 'u':\n return [uint8, uint16, uint32, uint64][item_idx]\n elif dtype.kind == 'f':\n if dtype.itemsize == 2:\n pass # half floats not supported yet\n elif dtype.itemsize == 4:\n return float32\n elif dtype.itemsize == 8:\n return float64\n elif dtype.itemsize == 16:\n return float128\n elif dtype.kind == 'b':\n return int8\n elif dtype.kind == 'c':\n if dtype.itemsize == 8:\n return complex64\n elif dtype.itemsize == 16:\n return complex128\n elif dtype.itemsize == 32:\n return complex256\n elif dtype.kind == 'O':\n return object_\n\nNONE_KIND = 0\nINT_KIND = 1\nFLOAT_KIND = 2\nCOMPLEX_KIND = 3\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n\n is_array = False\n is_pointer = False\n is_typewrapper = False\n\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n\n kind = NONE_KIND\n\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"Remove the given qualifiers from the type\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"Get a pointer to this type\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n # Don't use isinstance here, compare on exact type to be consistent\n # with __hash__. Override where sensible\n return (type(self) is type(other) and\n self.comparison_type_list == other.comparison_type_list)\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n \"Only a step of 1 may be provided to indicate C or \"\n \"Fortran contiguity\")\n\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n \"Step may only be provided once, and only in the \"\n \"first or last dimension.\")\n\n if s.step == 1:\n step_idx = idx\n\n return ArrayType(self, len(item),\n is_c_contig=step_idx == len(item) - 1,\n is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"Get a corresponding llvm type from this type\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\nclass ArrayType(Type):\n\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.inner_contig]\n\n def pointer(self):\n raise Exception(\"You probably want a pointer type to the dtype\")\n\n def to_llvm(self, context):\n # raise Exception(\"Obtain a pointer to the dtype and convert that \"\n # \"to an LLVM type\")\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [\":\"] * self.ndim\n if self.is_c_contig:\n axes[-1] = \"::1\"\n elif self.is_f_contig:\n axes[0] = \"::1\"\n\n return \"%s[%s]\" % (self.dtype, \", \".join(axes))\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return \"%s *%s\" % (self.base_type, \" \".join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return \"%s[%d]\" % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return \"%s %s\" % (self.name, \" \".join(self.qualifiers))\n return self.name\n\nclass BoolType(NamedType):\n is_bool = True\n name = \"bool\"\n\n def __repr__(self):\n return \"int %s\" % \" \".join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = \"int\"\n signed = True\n rank = 4\n itemsize = 4\n\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\nclass FloatType(NumericType):\n is_float = True\n\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n # Note: what about fp80/fp96?\n assert self.itemsize == 16\n return lc.Type.fp128()\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n\n kind = COMPLEX_KIND\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = \"Py_ssize_t\"\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = \"npy_intp\"\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\nclass CharType(IntType):\n is_char = True\n name = \"char\"\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return \"const char *\"\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\nclass VoidType(NamedType):\n is_void = True\n name = \"void\"\n\n def to_llvm(self, context):\n return lc.Type.void()\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return \"PyObject *\"\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context),\n [arg_type.to_llvm(context)\n for arg_type in self.args],\n self.is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append(\"...\")\n\n return \"%s (*)(%s)\" % (self.return_type, \", \".join(args))\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert ((element_type.is_int or element_type.is_float) and\n element_type.itemsize in (4, 8)), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context),\n self.vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n else:\n if itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n#\n### Internal types\n#\nc_string_type = CStringType()\nvoid = VoidType()\n\n#\n### Public types\n#\nPy_ssize_t = Py_ssize_t_Type()\nnpy_intp = NPyIntp()\nsize_t = IntType(name=\"size_t\", rank=8.5, itemsize=8, signed=False)\nchar = CharType(name=\"char\")\nshort = IntType(name=\"short\", rank=2, itemsize=2)\nint_ = IntType(name=\"int\", rank=4, itemsize=4)\nlong_ = IntType(name=\"long\", rank=5, itemsize=4)\nlonglong = IntType(name=\"PY_LONG_LONG\", rank=8, itemsize=8)\n\nuchar = CharType(name=\"unsigned char\", signed=False)\nushort = IntType(name=\"unsigned short\", rank=2.5, itemsize=2, signed=False)\nuint = IntType(name=\"unsigned int\", rank=4.5, itemsize=4, signed=False)\nulong = IntType(name=\"unsigned long\", rank=5.5, itemsize=4, signed=False)\nulonglong = IntType(name=\"unsigned PY_LONG_LONG\", rank=8.5, itemsize=8,\n signed=False)\n\nbool_ = BoolType()\nobject_ = ObjectType()\n\nint8 = IntType(name=\"int8\", rank=1, itemsize=1)\nint16 = IntType(name=\"int16\", rank=2, itemsize=2)\nint32 = IntType(name=\"int32\", rank=4, itemsize=4)\nint64 = IntType(name=\"int64\", rank=8, itemsize=8)\n\nuint8 = IntType(name=\"uint8\", rank=1.5, signed=False, itemsize=1)\nuint16 = IntType(name=\"uint16\", rank=2.5, signed=False, itemsize=2)\nuint32 = IntType(name=\"uint32\", rank=4.5, signed=False, itemsize=4)\nuint64 = IntType(name=\"uint64\", rank=8.5, signed=False, itemsize=8)\n\nfloat32 = float_ = FloatType(name=\"float\", rank=10, itemsize=4)\nfloat64 = double = FloatType(name=\"double\", rank=12, itemsize=8)\nfloat128 = longdouble = FloatType(name=\"long double\", rank=14, itemsize=16)\n\ncomplex64 = ComplexType(name=\"complex64\", base_type=float32,\n rank=16, itemsize=8)\ncomplex128 = ComplexType(name=\"complex128\", base_type=float64,\n rank=18, itemsize=16)\ncomplex256 = ComplexType(name=\"complex256\", base_type=float128,\n rank=20, itemsize=32)\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n",
"<docstring token>\n__all__ = ['Py_ssize_t', 'void', 'char', 'uchar', 'int_', 'long_', 'bool_',\n 'object_', 'float_', 'double', 'longdouble', 'float32', 'float64',\n 'float128', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16',\n 'uint32', 'uint64', 'complex64', 'complex128', 'complex256', 'npy_intp']\nimport sys\nimport math\nimport ctypes\ntry:\n import llvm.core\n from llvm import core as lc\nexcept ImportError:\n llvm = None\nimport miniutils\nimport minierror\nif sys.maxint > 2 ** 33:\n _plat_bits = 64\nelse:\n _plat_bits = 32\n\n\nclass TypeMapper(object):\n \"\"\"\n >>> import miniast\n >>> context = miniast.Context()\n >>> miniast.typemapper = TypeMapper(context)\n >>> tm = context.typemapper\n\n >>> tm.promote_types(int8, double)\n double\n >>> tm.promote_types(int8, uint8)\n uint8\n >>> tm.promote_types(int8, complex128)\n complex128\n >>> tm.promote_types(int8, object_)\n PyObject *\n\n >>> tm.promote_types(int64, float32)\n float\n >>> tm.promote_types(int64, complex64)\n complex64\n >>> tm.promote_types(float32, float64)\n double\n >>> tm.promote_types(float32, complex64)\n complex64\n >>> tm.promote_types(complex64, complex128)\n complex128\n >>> tm.promote_types(complex256, object_)\n PyObject *\n\n >>> tm.promote_types(float32.pointer(), Py_ssize_t)\n float *\n >>> tm.promote_types(float32.pointer(), Py_ssize_t)\n float *\n >>> tm.promote_types(float32.pointer(), uint8)\n float *\n\n >>> tm.promote_types(float32.pointer(), float64.pointer())\n Traceback (most recent call last):\n ...\n UnpromotableTypeError: (float *, double *)\n\n >>> tm.promote_types(float32[:, ::1], float32[:, ::1])\n float[:, ::1]\n >>> tm.promote_types(float32[:, ::1], float64[:, ::1])\n double[:, ::1]\n >>> tm.promote_types(float32[:, ::1], float64[::1, :])\n double[:, :]\n >>> tm.promote_types(float32[:, :], complex128[:, :])\n complex128[:, :]\n >>> tm.promote_types(int_[:, :], object_[:, ::1])\n PyObject *[:, :]\n \"\"\"\n\n def __init__(self, context):\n self.context = context\n\n def map_type(self, opaque_type):\n if opaque_type.is_int:\n return int_\n elif opaque_type.is_float:\n return float_\n elif opaque_type.is_double:\n return double\n elif opaque_type.is_pointer:\n return PointerType(self.map_type(opaque_type.base_type))\n elif opaque_type.is_py_ssize_t:\n return Py_ssize_t\n elif opaque_type.is_char:\n return char\n else:\n raise minierror.UnmappableTypeError(opaque_type)\n\n def to_llvm(self, type):\n \"\"\"Return an LLVM type for the given type.\"\"\"\n raise NotImplementedError\n\n def from_python(self, value):\n \"\"\"Get a type from a python value\"\"\"\n np = sys.modules.get('numpy', None)\n if isinstance(value, float):\n return double\n elif isinstance(value, (int, long)):\n return int_\n elif isinstance(value, complex):\n return complex128\n elif np and isinstance(value, np.ndarray):\n dtype = map_dtype(value.dtype)\n return ArrayType(dtype, value.ndim, is_c_contig=value.flags[\n 'C_CONTIGUOUS'], is_f_contig=value.flags['F_CONTIGUOUS'])\n else:\n return object_\n\n def promote_numeric(self, type1, type2):\n \"\"\"Promote two numeric types\"\"\"\n return max([type1, type2], key=lambda type: type.rank)\n\n def promote_arrays(self, type1, type2):\n \"\"\"Promote two array types in an expression to a new array type\"\"\"\n equal_ndim = type1.ndim == type2.ndim\n return ArrayType(self.promote_types(type1.dtype, type2.dtype), ndim\n =max(type1.ndim, type2.ndim), is_c_contig=equal_ndim and type1.\n is_c_contig and type2.is_c_contig, is_f_contig=equal_ndim and\n type1.is_f_contig and type2.is_f_contig)\n\n def promote_types(self, type1, type2):\n \"\"\"Promote two arbitrary types\"\"\"\n if type1.is_pointer and type2.is_int_like:\n return type1\n elif type2.is_pointer and type2.is_int_like:\n return type2\n elif type1.is_object or type2.is_object:\n return object_\n elif type1.is_numeric and type2.is_numeric:\n return self.promote_numeric(type1, type2)\n elif type1.is_array and type2:\n return self.promote_arrays(type1, type2)\n else:\n raise minierror.UnpromotableTypeError((type1, type2))\n\n\ndef map_dtype(dtype):\n \"\"\"\n >>> _map_dtype(np.dtype(np.int32))\n int32\n >>> _map_dtype(np.dtype(np.int64))\n int64\n >>> _map_dtype(np.dtype(np.object))\n PyObject *\n >>> _map_dtype(np.dtype(np.float64))\n double\n >>> _map_dtype(np.dtype(np.complex128))\n complex128\n \"\"\"\n item_idx = int(math.log(dtype.itemsize, 2))\n if dtype.kind == 'i':\n return [int8, int16, int32, int64][item_idx]\n elif dtype.kind == 'u':\n return [uint8, uint16, uint32, uint64][item_idx]\n elif dtype.kind == 'f':\n if dtype.itemsize == 2:\n pass\n elif dtype.itemsize == 4:\n return float32\n elif dtype.itemsize == 8:\n return float64\n elif dtype.itemsize == 16:\n return float128\n elif dtype.kind == 'b':\n return int8\n elif dtype.kind == 'c':\n if dtype.itemsize == 8:\n return complex64\n elif dtype.itemsize == 16:\n return complex128\n elif dtype.itemsize == 32:\n return complex256\n elif dtype.kind == 'O':\n return object_\n\n\nNONE_KIND = 0\nINT_KIND = 1\nFLOAT_KIND = 2\nCOMPLEX_KIND = 3\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\nc_string_type = CStringType()\nvoid = VoidType()\nPy_ssize_t = Py_ssize_t_Type()\nnpy_intp = NPyIntp()\nsize_t = IntType(name='size_t', rank=8.5, itemsize=8, signed=False)\nchar = CharType(name='char')\nshort = IntType(name='short', rank=2, itemsize=2)\nint_ = IntType(name='int', rank=4, itemsize=4)\nlong_ = IntType(name='long', rank=5, itemsize=4)\nlonglong = IntType(name='PY_LONG_LONG', rank=8, itemsize=8)\nuchar = CharType(name='unsigned char', signed=False)\nushort = IntType(name='unsigned short', rank=2.5, itemsize=2, signed=False)\nuint = IntType(name='unsigned int', rank=4.5, itemsize=4, signed=False)\nulong = IntType(name='unsigned long', rank=5.5, itemsize=4, signed=False)\nulonglong = IntType(name='unsigned PY_LONG_LONG', rank=8.5, itemsize=8,\n signed=False)\nbool_ = BoolType()\nobject_ = ObjectType()\nint8 = IntType(name='int8', rank=1, itemsize=1)\nint16 = IntType(name='int16', rank=2, itemsize=2)\nint32 = IntType(name='int32', rank=4, itemsize=4)\nint64 = IntType(name='int64', rank=8, itemsize=8)\nuint8 = IntType(name='uint8', rank=1.5, signed=False, itemsize=1)\nuint16 = IntType(name='uint16', rank=2.5, signed=False, itemsize=2)\nuint32 = IntType(name='uint32', rank=4.5, signed=False, itemsize=4)\nuint64 = IntType(name='uint64', rank=8.5, signed=False, itemsize=8)\nfloat32 = float_ = FloatType(name='float', rank=10, itemsize=4)\nfloat64 = double = FloatType(name='double', rank=12, itemsize=8)\nfloat128 = longdouble = FloatType(name='long double', rank=14, itemsize=16)\ncomplex64 = ComplexType(name='complex64', base_type=float32, rank=16,\n itemsize=8)\ncomplex128 = ComplexType(name='complex128', base_type=float64, rank=18,\n itemsize=16)\ncomplex256 = ComplexType(name='complex256', base_type=float128, rank=20,\n itemsize=32)\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n",
"<docstring token>\n__all__ = ['Py_ssize_t', 'void', 'char', 'uchar', 'int_', 'long_', 'bool_',\n 'object_', 'float_', 'double', 'longdouble', 'float32', 'float64',\n 'float128', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16',\n 'uint32', 'uint64', 'complex64', 'complex128', 'complex256', 'npy_intp']\n<import token>\ntry:\n import llvm.core\n from llvm import core as lc\nexcept ImportError:\n llvm = None\n<import token>\nif sys.maxint > 2 ** 33:\n _plat_bits = 64\nelse:\n _plat_bits = 32\n\n\nclass TypeMapper(object):\n \"\"\"\n >>> import miniast\n >>> context = miniast.Context()\n >>> miniast.typemapper = TypeMapper(context)\n >>> tm = context.typemapper\n\n >>> tm.promote_types(int8, double)\n double\n >>> tm.promote_types(int8, uint8)\n uint8\n >>> tm.promote_types(int8, complex128)\n complex128\n >>> tm.promote_types(int8, object_)\n PyObject *\n\n >>> tm.promote_types(int64, float32)\n float\n >>> tm.promote_types(int64, complex64)\n complex64\n >>> tm.promote_types(float32, float64)\n double\n >>> tm.promote_types(float32, complex64)\n complex64\n >>> tm.promote_types(complex64, complex128)\n complex128\n >>> tm.promote_types(complex256, object_)\n PyObject *\n\n >>> tm.promote_types(float32.pointer(), Py_ssize_t)\n float *\n >>> tm.promote_types(float32.pointer(), Py_ssize_t)\n float *\n >>> tm.promote_types(float32.pointer(), uint8)\n float *\n\n >>> tm.promote_types(float32.pointer(), float64.pointer())\n Traceback (most recent call last):\n ...\n UnpromotableTypeError: (float *, double *)\n\n >>> tm.promote_types(float32[:, ::1], float32[:, ::1])\n float[:, ::1]\n >>> tm.promote_types(float32[:, ::1], float64[:, ::1])\n double[:, ::1]\n >>> tm.promote_types(float32[:, ::1], float64[::1, :])\n double[:, :]\n >>> tm.promote_types(float32[:, :], complex128[:, :])\n complex128[:, :]\n >>> tm.promote_types(int_[:, :], object_[:, ::1])\n PyObject *[:, :]\n \"\"\"\n\n def __init__(self, context):\n self.context = context\n\n def map_type(self, opaque_type):\n if opaque_type.is_int:\n return int_\n elif opaque_type.is_float:\n return float_\n elif opaque_type.is_double:\n return double\n elif opaque_type.is_pointer:\n return PointerType(self.map_type(opaque_type.base_type))\n elif opaque_type.is_py_ssize_t:\n return Py_ssize_t\n elif opaque_type.is_char:\n return char\n else:\n raise minierror.UnmappableTypeError(opaque_type)\n\n def to_llvm(self, type):\n \"\"\"Return an LLVM type for the given type.\"\"\"\n raise NotImplementedError\n\n def from_python(self, value):\n \"\"\"Get a type from a python value\"\"\"\n np = sys.modules.get('numpy', None)\n if isinstance(value, float):\n return double\n elif isinstance(value, (int, long)):\n return int_\n elif isinstance(value, complex):\n return complex128\n elif np and isinstance(value, np.ndarray):\n dtype = map_dtype(value.dtype)\n return ArrayType(dtype, value.ndim, is_c_contig=value.flags[\n 'C_CONTIGUOUS'], is_f_contig=value.flags['F_CONTIGUOUS'])\n else:\n return object_\n\n def promote_numeric(self, type1, type2):\n \"\"\"Promote two numeric types\"\"\"\n return max([type1, type2], key=lambda type: type.rank)\n\n def promote_arrays(self, type1, type2):\n \"\"\"Promote two array types in an expression to a new array type\"\"\"\n equal_ndim = type1.ndim == type2.ndim\n return ArrayType(self.promote_types(type1.dtype, type2.dtype), ndim\n =max(type1.ndim, type2.ndim), is_c_contig=equal_ndim and type1.\n is_c_contig and type2.is_c_contig, is_f_contig=equal_ndim and\n type1.is_f_contig and type2.is_f_contig)\n\n def promote_types(self, type1, type2):\n \"\"\"Promote two arbitrary types\"\"\"\n if type1.is_pointer and type2.is_int_like:\n return type1\n elif type2.is_pointer and type2.is_int_like:\n return type2\n elif type1.is_object or type2.is_object:\n return object_\n elif type1.is_numeric and type2.is_numeric:\n return self.promote_numeric(type1, type2)\n elif type1.is_array and type2:\n return self.promote_arrays(type1, type2)\n else:\n raise minierror.UnpromotableTypeError((type1, type2))\n\n\ndef map_dtype(dtype):\n \"\"\"\n >>> _map_dtype(np.dtype(np.int32))\n int32\n >>> _map_dtype(np.dtype(np.int64))\n int64\n >>> _map_dtype(np.dtype(np.object))\n PyObject *\n >>> _map_dtype(np.dtype(np.float64))\n double\n >>> _map_dtype(np.dtype(np.complex128))\n complex128\n \"\"\"\n item_idx = int(math.log(dtype.itemsize, 2))\n if dtype.kind == 'i':\n return [int8, int16, int32, int64][item_idx]\n elif dtype.kind == 'u':\n return [uint8, uint16, uint32, uint64][item_idx]\n elif dtype.kind == 'f':\n if dtype.itemsize == 2:\n pass\n elif dtype.itemsize == 4:\n return float32\n elif dtype.itemsize == 8:\n return float64\n elif dtype.itemsize == 16:\n return float128\n elif dtype.kind == 'b':\n return int8\n elif dtype.kind == 'c':\n if dtype.itemsize == 8:\n return complex64\n elif dtype.itemsize == 16:\n return complex128\n elif dtype.itemsize == 32:\n return complex256\n elif dtype.kind == 'O':\n return object_\n\n\nNONE_KIND = 0\nINT_KIND = 1\nFLOAT_KIND = 2\nCOMPLEX_KIND = 3\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\nc_string_type = CStringType()\nvoid = VoidType()\nPy_ssize_t = Py_ssize_t_Type()\nnpy_intp = NPyIntp()\nsize_t = IntType(name='size_t', rank=8.5, itemsize=8, signed=False)\nchar = CharType(name='char')\nshort = IntType(name='short', rank=2, itemsize=2)\nint_ = IntType(name='int', rank=4, itemsize=4)\nlong_ = IntType(name='long', rank=5, itemsize=4)\nlonglong = IntType(name='PY_LONG_LONG', rank=8, itemsize=8)\nuchar = CharType(name='unsigned char', signed=False)\nushort = IntType(name='unsigned short', rank=2.5, itemsize=2, signed=False)\nuint = IntType(name='unsigned int', rank=4.5, itemsize=4, signed=False)\nulong = IntType(name='unsigned long', rank=5.5, itemsize=4, signed=False)\nulonglong = IntType(name='unsigned PY_LONG_LONG', rank=8.5, itemsize=8,\n signed=False)\nbool_ = BoolType()\nobject_ = ObjectType()\nint8 = IntType(name='int8', rank=1, itemsize=1)\nint16 = IntType(name='int16', rank=2, itemsize=2)\nint32 = IntType(name='int32', rank=4, itemsize=4)\nint64 = IntType(name='int64', rank=8, itemsize=8)\nuint8 = IntType(name='uint8', rank=1.5, signed=False, itemsize=1)\nuint16 = IntType(name='uint16', rank=2.5, signed=False, itemsize=2)\nuint32 = IntType(name='uint32', rank=4.5, signed=False, itemsize=4)\nuint64 = IntType(name='uint64', rank=8.5, signed=False, itemsize=8)\nfloat32 = float_ = FloatType(name='float', rank=10, itemsize=4)\nfloat64 = double = FloatType(name='double', rank=12, itemsize=8)\nfloat128 = longdouble = FloatType(name='long double', rank=14, itemsize=16)\ncomplex64 = ComplexType(name='complex64', base_type=float32, rank=16,\n itemsize=8)\ncomplex128 = ComplexType(name='complex128', base_type=float64, rank=18,\n itemsize=16)\ncomplex256 = ComplexType(name='complex256', base_type=float128, rank=20,\n itemsize=32)\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n",
"<docstring token>\n<assignment token>\n<import token>\ntry:\n import llvm.core\n from llvm import core as lc\nexcept ImportError:\n llvm = None\n<import token>\nif sys.maxint > 2 ** 33:\n _plat_bits = 64\nelse:\n _plat_bits = 32\n\n\nclass TypeMapper(object):\n \"\"\"\n >>> import miniast\n >>> context = miniast.Context()\n >>> miniast.typemapper = TypeMapper(context)\n >>> tm = context.typemapper\n\n >>> tm.promote_types(int8, double)\n double\n >>> tm.promote_types(int8, uint8)\n uint8\n >>> tm.promote_types(int8, complex128)\n complex128\n >>> tm.promote_types(int8, object_)\n PyObject *\n\n >>> tm.promote_types(int64, float32)\n float\n >>> tm.promote_types(int64, complex64)\n complex64\n >>> tm.promote_types(float32, float64)\n double\n >>> tm.promote_types(float32, complex64)\n complex64\n >>> tm.promote_types(complex64, complex128)\n complex128\n >>> tm.promote_types(complex256, object_)\n PyObject *\n\n >>> tm.promote_types(float32.pointer(), Py_ssize_t)\n float *\n >>> tm.promote_types(float32.pointer(), Py_ssize_t)\n float *\n >>> tm.promote_types(float32.pointer(), uint8)\n float *\n\n >>> tm.promote_types(float32.pointer(), float64.pointer())\n Traceback (most recent call last):\n ...\n UnpromotableTypeError: (float *, double *)\n\n >>> tm.promote_types(float32[:, ::1], float32[:, ::1])\n float[:, ::1]\n >>> tm.promote_types(float32[:, ::1], float64[:, ::1])\n double[:, ::1]\n >>> tm.promote_types(float32[:, ::1], float64[::1, :])\n double[:, :]\n >>> tm.promote_types(float32[:, :], complex128[:, :])\n complex128[:, :]\n >>> tm.promote_types(int_[:, :], object_[:, ::1])\n PyObject *[:, :]\n \"\"\"\n\n def __init__(self, context):\n self.context = context\n\n def map_type(self, opaque_type):\n if opaque_type.is_int:\n return int_\n elif opaque_type.is_float:\n return float_\n elif opaque_type.is_double:\n return double\n elif opaque_type.is_pointer:\n return PointerType(self.map_type(opaque_type.base_type))\n elif opaque_type.is_py_ssize_t:\n return Py_ssize_t\n elif opaque_type.is_char:\n return char\n else:\n raise minierror.UnmappableTypeError(opaque_type)\n\n def to_llvm(self, type):\n \"\"\"Return an LLVM type for the given type.\"\"\"\n raise NotImplementedError\n\n def from_python(self, value):\n \"\"\"Get a type from a python value\"\"\"\n np = sys.modules.get('numpy', None)\n if isinstance(value, float):\n return double\n elif isinstance(value, (int, long)):\n return int_\n elif isinstance(value, complex):\n return complex128\n elif np and isinstance(value, np.ndarray):\n dtype = map_dtype(value.dtype)\n return ArrayType(dtype, value.ndim, is_c_contig=value.flags[\n 'C_CONTIGUOUS'], is_f_contig=value.flags['F_CONTIGUOUS'])\n else:\n return object_\n\n def promote_numeric(self, type1, type2):\n \"\"\"Promote two numeric types\"\"\"\n return max([type1, type2], key=lambda type: type.rank)\n\n def promote_arrays(self, type1, type2):\n \"\"\"Promote two array types in an expression to a new array type\"\"\"\n equal_ndim = type1.ndim == type2.ndim\n return ArrayType(self.promote_types(type1.dtype, type2.dtype), ndim\n =max(type1.ndim, type2.ndim), is_c_contig=equal_ndim and type1.\n is_c_contig and type2.is_c_contig, is_f_contig=equal_ndim and\n type1.is_f_contig and type2.is_f_contig)\n\n def promote_types(self, type1, type2):\n \"\"\"Promote two arbitrary types\"\"\"\n if type1.is_pointer and type2.is_int_like:\n return type1\n elif type2.is_pointer and type2.is_int_like:\n return type2\n elif type1.is_object or type2.is_object:\n return object_\n elif type1.is_numeric and type2.is_numeric:\n return self.promote_numeric(type1, type2)\n elif type1.is_array and type2:\n return self.promote_arrays(type1, type2)\n else:\n raise minierror.UnpromotableTypeError((type1, type2))\n\n\ndef map_dtype(dtype):\n \"\"\"\n >>> _map_dtype(np.dtype(np.int32))\n int32\n >>> _map_dtype(np.dtype(np.int64))\n int64\n >>> _map_dtype(np.dtype(np.object))\n PyObject *\n >>> _map_dtype(np.dtype(np.float64))\n double\n >>> _map_dtype(np.dtype(np.complex128))\n complex128\n \"\"\"\n item_idx = int(math.log(dtype.itemsize, 2))\n if dtype.kind == 'i':\n return [int8, int16, int32, int64][item_idx]\n elif dtype.kind == 'u':\n return [uint8, uint16, uint32, uint64][item_idx]\n elif dtype.kind == 'f':\n if dtype.itemsize == 2:\n pass\n elif dtype.itemsize == 4:\n return float32\n elif dtype.itemsize == 8:\n return float64\n elif dtype.itemsize == 16:\n return float128\n elif dtype.kind == 'b':\n return int8\n elif dtype.kind == 'c':\n if dtype.itemsize == 8:\n return complex64\n elif dtype.itemsize == 16:\n return complex128\n elif dtype.itemsize == 32:\n return complex256\n elif dtype.kind == 'O':\n return object_\n\n\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\nclass TypeMapper(object):\n \"\"\"\n >>> import miniast\n >>> context = miniast.Context()\n >>> miniast.typemapper = TypeMapper(context)\n >>> tm = context.typemapper\n\n >>> tm.promote_types(int8, double)\n double\n >>> tm.promote_types(int8, uint8)\n uint8\n >>> tm.promote_types(int8, complex128)\n complex128\n >>> tm.promote_types(int8, object_)\n PyObject *\n\n >>> tm.promote_types(int64, float32)\n float\n >>> tm.promote_types(int64, complex64)\n complex64\n >>> tm.promote_types(float32, float64)\n double\n >>> tm.promote_types(float32, complex64)\n complex64\n >>> tm.promote_types(complex64, complex128)\n complex128\n >>> tm.promote_types(complex256, object_)\n PyObject *\n\n >>> tm.promote_types(float32.pointer(), Py_ssize_t)\n float *\n >>> tm.promote_types(float32.pointer(), Py_ssize_t)\n float *\n >>> tm.promote_types(float32.pointer(), uint8)\n float *\n\n >>> tm.promote_types(float32.pointer(), float64.pointer())\n Traceback (most recent call last):\n ...\n UnpromotableTypeError: (float *, double *)\n\n >>> tm.promote_types(float32[:, ::1], float32[:, ::1])\n float[:, ::1]\n >>> tm.promote_types(float32[:, ::1], float64[:, ::1])\n double[:, ::1]\n >>> tm.promote_types(float32[:, ::1], float64[::1, :])\n double[:, :]\n >>> tm.promote_types(float32[:, :], complex128[:, :])\n complex128[:, :]\n >>> tm.promote_types(int_[:, :], object_[:, ::1])\n PyObject *[:, :]\n \"\"\"\n\n def __init__(self, context):\n self.context = context\n\n def map_type(self, opaque_type):\n if opaque_type.is_int:\n return int_\n elif opaque_type.is_float:\n return float_\n elif opaque_type.is_double:\n return double\n elif opaque_type.is_pointer:\n return PointerType(self.map_type(opaque_type.base_type))\n elif opaque_type.is_py_ssize_t:\n return Py_ssize_t\n elif opaque_type.is_char:\n return char\n else:\n raise minierror.UnmappableTypeError(opaque_type)\n\n def to_llvm(self, type):\n \"\"\"Return an LLVM type for the given type.\"\"\"\n raise NotImplementedError\n\n def from_python(self, value):\n \"\"\"Get a type from a python value\"\"\"\n np = sys.modules.get('numpy', None)\n if isinstance(value, float):\n return double\n elif isinstance(value, (int, long)):\n return int_\n elif isinstance(value, complex):\n return complex128\n elif np and isinstance(value, np.ndarray):\n dtype = map_dtype(value.dtype)\n return ArrayType(dtype, value.ndim, is_c_contig=value.flags[\n 'C_CONTIGUOUS'], is_f_contig=value.flags['F_CONTIGUOUS'])\n else:\n return object_\n\n def promote_numeric(self, type1, type2):\n \"\"\"Promote two numeric types\"\"\"\n return max([type1, type2], key=lambda type: type.rank)\n\n def promote_arrays(self, type1, type2):\n \"\"\"Promote two array types in an expression to a new array type\"\"\"\n equal_ndim = type1.ndim == type2.ndim\n return ArrayType(self.promote_types(type1.dtype, type2.dtype), ndim\n =max(type1.ndim, type2.ndim), is_c_contig=equal_ndim and type1.\n is_c_contig and type2.is_c_contig, is_f_contig=equal_ndim and\n type1.is_f_contig and type2.is_f_contig)\n\n def promote_types(self, type1, type2):\n \"\"\"Promote two arbitrary types\"\"\"\n if type1.is_pointer and type2.is_int_like:\n return type1\n elif type2.is_pointer and type2.is_int_like:\n return type2\n elif type1.is_object or type2.is_object:\n return object_\n elif type1.is_numeric and type2.is_numeric:\n return self.promote_numeric(type1, type2)\n elif type1.is_array and type2:\n return self.promote_arrays(type1, type2)\n else:\n raise minierror.UnpromotableTypeError((type1, type2))\n\n\ndef map_dtype(dtype):\n \"\"\"\n >>> _map_dtype(np.dtype(np.int32))\n int32\n >>> _map_dtype(np.dtype(np.int64))\n int64\n >>> _map_dtype(np.dtype(np.object))\n PyObject *\n >>> _map_dtype(np.dtype(np.float64))\n double\n >>> _map_dtype(np.dtype(np.complex128))\n complex128\n \"\"\"\n item_idx = int(math.log(dtype.itemsize, 2))\n if dtype.kind == 'i':\n return [int8, int16, int32, int64][item_idx]\n elif dtype.kind == 'u':\n return [uint8, uint16, uint32, uint64][item_idx]\n elif dtype.kind == 'f':\n if dtype.itemsize == 2:\n pass\n elif dtype.itemsize == 4:\n return float32\n elif dtype.itemsize == 8:\n return float64\n elif dtype.itemsize == 16:\n return float128\n elif dtype.kind == 'b':\n return int8\n elif dtype.kind == 'c':\n if dtype.itemsize == 8:\n return complex64\n elif dtype.itemsize == 16:\n return complex128\n elif dtype.itemsize == 32:\n return complex256\n elif dtype.kind == 'O':\n return object_\n\n\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\nclass TypeMapper(object):\n \"\"\"\n >>> import miniast\n >>> context = miniast.Context()\n >>> miniast.typemapper = TypeMapper(context)\n >>> tm = context.typemapper\n\n >>> tm.promote_types(int8, double)\n double\n >>> tm.promote_types(int8, uint8)\n uint8\n >>> tm.promote_types(int8, complex128)\n complex128\n >>> tm.promote_types(int8, object_)\n PyObject *\n\n >>> tm.promote_types(int64, float32)\n float\n >>> tm.promote_types(int64, complex64)\n complex64\n >>> tm.promote_types(float32, float64)\n double\n >>> tm.promote_types(float32, complex64)\n complex64\n >>> tm.promote_types(complex64, complex128)\n complex128\n >>> tm.promote_types(complex256, object_)\n PyObject *\n\n >>> tm.promote_types(float32.pointer(), Py_ssize_t)\n float *\n >>> tm.promote_types(float32.pointer(), Py_ssize_t)\n float *\n >>> tm.promote_types(float32.pointer(), uint8)\n float *\n\n >>> tm.promote_types(float32.pointer(), float64.pointer())\n Traceback (most recent call last):\n ...\n UnpromotableTypeError: (float *, double *)\n\n >>> tm.promote_types(float32[:, ::1], float32[:, ::1])\n float[:, ::1]\n >>> tm.promote_types(float32[:, ::1], float64[:, ::1])\n double[:, ::1]\n >>> tm.promote_types(float32[:, ::1], float64[::1, :])\n double[:, :]\n >>> tm.promote_types(float32[:, :], complex128[:, :])\n complex128[:, :]\n >>> tm.promote_types(int_[:, :], object_[:, ::1])\n PyObject *[:, :]\n \"\"\"\n\n def __init__(self, context):\n self.context = context\n\n def map_type(self, opaque_type):\n if opaque_type.is_int:\n return int_\n elif opaque_type.is_float:\n return float_\n elif opaque_type.is_double:\n return double\n elif opaque_type.is_pointer:\n return PointerType(self.map_type(opaque_type.base_type))\n elif opaque_type.is_py_ssize_t:\n return Py_ssize_t\n elif opaque_type.is_char:\n return char\n else:\n raise minierror.UnmappableTypeError(opaque_type)\n\n def to_llvm(self, type):\n \"\"\"Return an LLVM type for the given type.\"\"\"\n raise NotImplementedError\n\n def from_python(self, value):\n \"\"\"Get a type from a python value\"\"\"\n np = sys.modules.get('numpy', None)\n if isinstance(value, float):\n return double\n elif isinstance(value, (int, long)):\n return int_\n elif isinstance(value, complex):\n return complex128\n elif np and isinstance(value, np.ndarray):\n dtype = map_dtype(value.dtype)\n return ArrayType(dtype, value.ndim, is_c_contig=value.flags[\n 'C_CONTIGUOUS'], is_f_contig=value.flags['F_CONTIGUOUS'])\n else:\n return object_\n\n def promote_numeric(self, type1, type2):\n \"\"\"Promote two numeric types\"\"\"\n return max([type1, type2], key=lambda type: type.rank)\n\n def promote_arrays(self, type1, type2):\n \"\"\"Promote two array types in an expression to a new array type\"\"\"\n equal_ndim = type1.ndim == type2.ndim\n return ArrayType(self.promote_types(type1.dtype, type2.dtype), ndim\n =max(type1.ndim, type2.ndim), is_c_contig=equal_ndim and type1.\n is_c_contig and type2.is_c_contig, is_f_contig=equal_ndim and\n type1.is_f_contig and type2.is_f_contig)\n\n def promote_types(self, type1, type2):\n \"\"\"Promote two arbitrary types\"\"\"\n if type1.is_pointer and type2.is_int_like:\n return type1\n elif type2.is_pointer and type2.is_int_like:\n return type2\n elif type1.is_object or type2.is_object:\n return object_\n elif type1.is_numeric and type2.is_numeric:\n return self.promote_numeric(type1, type2)\n elif type1.is_array and type2:\n return self.promote_arrays(type1, type2)\n else:\n raise minierror.UnpromotableTypeError((type1, type2))\n\n\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\nclass TypeMapper(object):\n <docstring token>\n\n def __init__(self, context):\n self.context = context\n\n def map_type(self, opaque_type):\n if opaque_type.is_int:\n return int_\n elif opaque_type.is_float:\n return float_\n elif opaque_type.is_double:\n return double\n elif opaque_type.is_pointer:\n return PointerType(self.map_type(opaque_type.base_type))\n elif opaque_type.is_py_ssize_t:\n return Py_ssize_t\n elif opaque_type.is_char:\n return char\n else:\n raise minierror.UnmappableTypeError(opaque_type)\n\n def to_llvm(self, type):\n \"\"\"Return an LLVM type for the given type.\"\"\"\n raise NotImplementedError\n\n def from_python(self, value):\n \"\"\"Get a type from a python value\"\"\"\n np = sys.modules.get('numpy', None)\n if isinstance(value, float):\n return double\n elif isinstance(value, (int, long)):\n return int_\n elif isinstance(value, complex):\n return complex128\n elif np and isinstance(value, np.ndarray):\n dtype = map_dtype(value.dtype)\n return ArrayType(dtype, value.ndim, is_c_contig=value.flags[\n 'C_CONTIGUOUS'], is_f_contig=value.flags['F_CONTIGUOUS'])\n else:\n return object_\n\n def promote_numeric(self, type1, type2):\n \"\"\"Promote two numeric types\"\"\"\n return max([type1, type2], key=lambda type: type.rank)\n\n def promote_arrays(self, type1, type2):\n \"\"\"Promote two array types in an expression to a new array type\"\"\"\n equal_ndim = type1.ndim == type2.ndim\n return ArrayType(self.promote_types(type1.dtype, type2.dtype), ndim\n =max(type1.ndim, type2.ndim), is_c_contig=equal_ndim and type1.\n is_c_contig and type2.is_c_contig, is_f_contig=equal_ndim and\n type1.is_f_contig and type2.is_f_contig)\n\n def promote_types(self, type1, type2):\n \"\"\"Promote two arbitrary types\"\"\"\n if type1.is_pointer and type2.is_int_like:\n return type1\n elif type2.is_pointer and type2.is_int_like:\n return type2\n elif type1.is_object or type2.is_object:\n return object_\n elif type1.is_numeric and type2.is_numeric:\n return self.promote_numeric(type1, type2)\n elif type1.is_array and type2:\n return self.promote_arrays(type1, type2)\n else:\n raise minierror.UnpromotableTypeError((type1, type2))\n\n\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\nclass TypeMapper(object):\n <docstring token>\n <function token>\n\n def map_type(self, opaque_type):\n if opaque_type.is_int:\n return int_\n elif opaque_type.is_float:\n return float_\n elif opaque_type.is_double:\n return double\n elif opaque_type.is_pointer:\n return PointerType(self.map_type(opaque_type.base_type))\n elif opaque_type.is_py_ssize_t:\n return Py_ssize_t\n elif opaque_type.is_char:\n return char\n else:\n raise minierror.UnmappableTypeError(opaque_type)\n\n def to_llvm(self, type):\n \"\"\"Return an LLVM type for the given type.\"\"\"\n raise NotImplementedError\n\n def from_python(self, value):\n \"\"\"Get a type from a python value\"\"\"\n np = sys.modules.get('numpy', None)\n if isinstance(value, float):\n return double\n elif isinstance(value, (int, long)):\n return int_\n elif isinstance(value, complex):\n return complex128\n elif np and isinstance(value, np.ndarray):\n dtype = map_dtype(value.dtype)\n return ArrayType(dtype, value.ndim, is_c_contig=value.flags[\n 'C_CONTIGUOUS'], is_f_contig=value.flags['F_CONTIGUOUS'])\n else:\n return object_\n\n def promote_numeric(self, type1, type2):\n \"\"\"Promote two numeric types\"\"\"\n return max([type1, type2], key=lambda type: type.rank)\n\n def promote_arrays(self, type1, type2):\n \"\"\"Promote two array types in an expression to a new array type\"\"\"\n equal_ndim = type1.ndim == type2.ndim\n return ArrayType(self.promote_types(type1.dtype, type2.dtype), ndim\n =max(type1.ndim, type2.ndim), is_c_contig=equal_ndim and type1.\n is_c_contig and type2.is_c_contig, is_f_contig=equal_ndim and\n type1.is_f_contig and type2.is_f_contig)\n\n def promote_types(self, type1, type2):\n \"\"\"Promote two arbitrary types\"\"\"\n if type1.is_pointer and type2.is_int_like:\n return type1\n elif type2.is_pointer and type2.is_int_like:\n return type2\n elif type1.is_object or type2.is_object:\n return object_\n elif type1.is_numeric and type2.is_numeric:\n return self.promote_numeric(type1, type2)\n elif type1.is_array and type2:\n return self.promote_arrays(type1, type2)\n else:\n raise minierror.UnpromotableTypeError((type1, type2))\n\n\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\nclass TypeMapper(object):\n <docstring token>\n <function token>\n\n def map_type(self, opaque_type):\n if opaque_type.is_int:\n return int_\n elif opaque_type.is_float:\n return float_\n elif opaque_type.is_double:\n return double\n elif opaque_type.is_pointer:\n return PointerType(self.map_type(opaque_type.base_type))\n elif opaque_type.is_py_ssize_t:\n return Py_ssize_t\n elif opaque_type.is_char:\n return char\n else:\n raise minierror.UnmappableTypeError(opaque_type)\n\n def to_llvm(self, type):\n \"\"\"Return an LLVM type for the given type.\"\"\"\n raise NotImplementedError\n <function token>\n\n def promote_numeric(self, type1, type2):\n \"\"\"Promote two numeric types\"\"\"\n return max([type1, type2], key=lambda type: type.rank)\n\n def promote_arrays(self, type1, type2):\n \"\"\"Promote two array types in an expression to a new array type\"\"\"\n equal_ndim = type1.ndim == type2.ndim\n return ArrayType(self.promote_types(type1.dtype, type2.dtype), ndim\n =max(type1.ndim, type2.ndim), is_c_contig=equal_ndim and type1.\n is_c_contig and type2.is_c_contig, is_f_contig=equal_ndim and\n type1.is_f_contig and type2.is_f_contig)\n\n def promote_types(self, type1, type2):\n \"\"\"Promote two arbitrary types\"\"\"\n if type1.is_pointer and type2.is_int_like:\n return type1\n elif type2.is_pointer and type2.is_int_like:\n return type2\n elif type1.is_object or type2.is_object:\n return object_\n elif type1.is_numeric and type2.is_numeric:\n return self.promote_numeric(type1, type2)\n elif type1.is_array and type2:\n return self.promote_arrays(type1, type2)\n else:\n raise minierror.UnpromotableTypeError((type1, type2))\n\n\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\nclass TypeMapper(object):\n <docstring token>\n <function token>\n <function token>\n\n def to_llvm(self, type):\n \"\"\"Return an LLVM type for the given type.\"\"\"\n raise NotImplementedError\n <function token>\n\n def promote_numeric(self, type1, type2):\n \"\"\"Promote two numeric types\"\"\"\n return max([type1, type2], key=lambda type: type.rank)\n\n def promote_arrays(self, type1, type2):\n \"\"\"Promote two array types in an expression to a new array type\"\"\"\n equal_ndim = type1.ndim == type2.ndim\n return ArrayType(self.promote_types(type1.dtype, type2.dtype), ndim\n =max(type1.ndim, type2.ndim), is_c_contig=equal_ndim and type1.\n is_c_contig and type2.is_c_contig, is_f_contig=equal_ndim and\n type1.is_f_contig and type2.is_f_contig)\n\n def promote_types(self, type1, type2):\n \"\"\"Promote two arbitrary types\"\"\"\n if type1.is_pointer and type2.is_int_like:\n return type1\n elif type2.is_pointer and type2.is_int_like:\n return type2\n elif type1.is_object or type2.is_object:\n return object_\n elif type1.is_numeric and type2.is_numeric:\n return self.promote_numeric(type1, type2)\n elif type1.is_array and type2:\n return self.promote_arrays(type1, type2)\n else:\n raise minierror.UnpromotableTypeError((type1, type2))\n\n\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\nclass TypeMapper(object):\n <docstring token>\n <function token>\n <function token>\n\n def to_llvm(self, type):\n \"\"\"Return an LLVM type for the given type.\"\"\"\n raise NotImplementedError\n <function token>\n <function token>\n\n def promote_arrays(self, type1, type2):\n \"\"\"Promote two array types in an expression to a new array type\"\"\"\n equal_ndim = type1.ndim == type2.ndim\n return ArrayType(self.promote_types(type1.dtype, type2.dtype), ndim\n =max(type1.ndim, type2.ndim), is_c_contig=equal_ndim and type1.\n is_c_contig and type2.is_c_contig, is_f_contig=equal_ndim and\n type1.is_f_contig and type2.is_f_contig)\n\n def promote_types(self, type1, type2):\n \"\"\"Promote two arbitrary types\"\"\"\n if type1.is_pointer and type2.is_int_like:\n return type1\n elif type2.is_pointer and type2.is_int_like:\n return type2\n elif type1.is_object or type2.is_object:\n return object_\n elif type1.is_numeric and type2.is_numeric:\n return self.promote_numeric(type1, type2)\n elif type1.is_array and type2:\n return self.promote_arrays(type1, type2)\n else:\n raise minierror.UnpromotableTypeError((type1, type2))\n\n\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\nclass TypeMapper(object):\n <docstring token>\n <function token>\n <function token>\n\n def to_llvm(self, type):\n \"\"\"Return an LLVM type for the given type.\"\"\"\n raise NotImplementedError\n <function token>\n <function token>\n <function token>\n\n def promote_types(self, type1, type2):\n \"\"\"Promote two arbitrary types\"\"\"\n if type1.is_pointer and type2.is_int_like:\n return type1\n elif type2.is_pointer and type2.is_int_like:\n return type2\n elif type1.is_object or type2.is_object:\n return object_\n elif type1.is_numeric and type2.is_numeric:\n return self.promote_numeric(type1, type2)\n elif type1.is_array and type2:\n return self.promote_arrays(type1, type2)\n else:\n raise minierror.UnpromotableTypeError((type1, type2))\n\n\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\nclass TypeMapper(object):\n <docstring token>\n <function token>\n <function token>\n\n def to_llvm(self, type):\n \"\"\"Return an LLVM type for the given type.\"\"\"\n raise NotImplementedError\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\nclass TypeMapper(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n \"\"\"\n Base class for all types.\n\n .. attribute:: subtypes\n\n The list of subtypes to allow comparing and hashing them recursively\n \"\"\"\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n is_array = False\n is_pointer = False\n is_typewrapper = False\n is_bool = False\n is_numeric = False\n is_py_ssize_t = False\n is_char = False\n is_int = False\n is_float = False\n is_c_string = False\n is_object = False\n is_function = False\n is_int_like = False\n is_complex = False\n is_void = False\n kind = NONE_KIND\n subtypes = []\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def unqualify(self, *unqualifiers):\n \"\"\"Remove the given qualifiers from the type\"\"\"\n unqualifiers = set(unqualifiers)\n qualifiers = [q for q in self.qualifiers if q not in unqualifiers]\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n <function token>\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n\n def __eq__(self, other):\n return type(self) is type(other\n ) and self.comparison_type_list == other.comparison_type_list\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n <function token>\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n\n @property\n def subtype_list(self):\n return [getattr(self, subtype) for subtype in self.subtypes]\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n <function token>\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n <function token>\n\n def pointer(self):\n \"\"\"Get a pointer to this type\"\"\"\n return PointerType(self)\n <function token>\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n <function token>\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n <function token>\n <function token>\n <function token>\n\n @property\n def comparison_type_list(self):\n return self.subtype_list\n <function token>\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n\n def __getitem__(self, item):\n assert isinstance(item, (tuple, slice))\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise minierror.InvalidTypeSpecification(\n 'Only a step of 1 may be provided to indicate C or Fortran contiguity'\n )\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise minierror.InvalidTypeSpecification(\n 'Step may only be provided once, and only in the first or last dimension.'\n )\n if s.step == 1:\n step_idx = idx\n return ArrayType(self, len(item), is_c_contig=step_idx == len(\n item) - 1, is_f_contig=step_idx == 0)\n else:\n verify_slice(item)\n return ArrayType(self, 1, is_c_contig=bool(item.step))\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n\n def qualify(self, *qualifiers):\n \"\"\"Qualify this type with a qualifier such as ``const`` or ``restrict``\"\"\"\n qualifiers = list(qualifiers)\n qualifiers.extend(self.qualifiers)\n attribs = dict(vars(self), qualifiers=qualifiers)\n return type(self)(**attribs)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n <function token>\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n <function token>\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __hash__(self):\n h = hash(type(self))\n for subtype in self.comparison_type_list:\n h = h ^ hash(subtype)\n return h\n <function token>\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n\n def __getattr__(self, attr):\n if attr.startswith('is_'):\n return False\n return getattr(type(self), attr)\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def to_llvm(self, context):\n \"\"\"Get a corresponding llvm type from this type\"\"\"\n return context.to_llvm(self)\n <function token>\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n vars(self).update(kwds)\n self.qualifiers = kwds.get('qualifiers', frozenset())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n\n\nclass Type(miniutils.ComparableObjectMixin):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n\n\nclass ArrayType(Type):\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n\n\nclass ArrayType(Type):\n <assignment token>\n <assignment token>\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n super(ArrayType, self).__init__()\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting or (True,) * ndim\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n\n\nclass ArrayType(Type):\n <assignment token>\n <assignment token>\n <function token>\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n\n def __repr__(self):\n axes = [':'] * self.ndim\n if self.is_c_contig:\n axes[-1] = '::1'\n elif self.is_f_contig:\n axes[0] = '::1'\n return '%s[%s]' % (self.dtype, ', '.join(axes))\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n\n\nclass ArrayType(Type):\n <assignment token>\n <assignment token>\n <function token>\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n\n def pointer(self):\n raise Exception('You probably want a pointer type to the dtype')\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n <function token>\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n\n\nclass ArrayType(Type):\n <assignment token>\n <assignment token>\n <function token>\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n <function token>\n\n def to_llvm(self, context):\n return context.to_llvm(self)\n <function token>\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n\n\nclass ArrayType(Type):\n <assignment token>\n <assignment token>\n <function token>\n\n @property\n def comparison_type_list(self):\n return [self.dtype, self.is_c_contig, self.is_f_contig, self.\n inner_contig]\n <function token>\n <function token>\n <function token>\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n\n\nclass ArrayType(Type):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass PointerType(Type):\n is_pointer = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass PointerType(Type):\n <assignment token>\n <assignment token>\n\n def __init__(self, base_type, **kwds):\n super(PointerType, self).__init__(**kwds)\n self.base_type = base_type\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass PointerType(Type):\n <assignment token>\n <assignment token>\n <function token>\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n\n def to_llvm(self, context):\n return llvm.core.Type.pointer(self.base_type.to_llvm(context))\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass PointerType(Type):\n <assignment token>\n <assignment token>\n <function token>\n\n def __repr__(self):\n return '%s *%s' % (self.base_type, ' '.join(self.qualifiers))\n <function token>\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass PointerType(Type):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass CArrayType(Type):\n is_carray = True\n subtypes = ['base_type']\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass CArrayType(Type):\n <assignment token>\n <assignment token>\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n\n def to_llvm(self, context):\n return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass CArrayType(Type):\n <assignment token>\n <assignment token>\n\n def __init__(self, base_type, size, **kwds):\n super(CArrayType, self).__init__(**kwds)\n self.base_type = base_type\n self.size = size\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n <function token>\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass CArrayType(Type):\n <assignment token>\n <assignment token>\n <function token>\n\n def __repr__(self):\n return '%s[%d]' % (self.base_type, self.length)\n <function token>\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass CArrayType(Type):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TypeWrapper(Type):\n is_typewrapper = True\n subtypes = ['opaque_type']\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TypeWrapper(Type):\n <assignment token>\n <assignment token>\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n\n def __repr__(self):\n return self.context.declare_type(self)\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TypeWrapper(Type):\n <assignment token>\n <assignment token>\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n <function token>\n\n def __deepcopy__(self, memo):\n return self\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TypeWrapper(Type):\n <assignment token>\n <assignment token>\n\n def __init__(self, opaque_type, context, **kwds):\n super(TypeWrapper, self).__init__(**kwds)\n self.opaque_type = opaque_type\n self.context = context\n <function token>\n <function token>\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TypeWrapper(Type):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NamedType(Type):\n name = None\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NamedType(Type):\n <assignment token>\n\n def __eq__(self, other):\n return isinstance(other, NamedType) and self.name == other.name\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NamedType(Type):\n <assignment token>\n <function token>\n\n def __repr__(self):\n if self.qualifiers:\n return '%s %s' % (self.name, ' '.join(self.qualifiers))\n return self.name\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NamedType(Type):\n <assignment token>\n <function token>\n <function token>\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass BoolType(NamedType):\n is_bool = True\n name = 'bool'\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass BoolType(NamedType):\n <assignment token>\n <assignment token>\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n\n def to_llvm(self, context):\n return int8.to_llvm(context)\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass BoolType(NamedType):\n <assignment token>\n <assignment token>\n\n def __repr__(self):\n return 'int %s' % ' '.join(self.qualifiers)\n <function token>\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass BoolType(NamedType):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NumericType(NamedType):\n \"\"\"\n Base class for numeric types.\n\n .. attribute:: name\n\n name of the type\n\n .. attribute:: itemsize\n\n sizeof(type)\n\n .. attribute:: rank\n\n ordering of numeric types\n \"\"\"\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NumericType(NamedType):\n <docstring token>\n is_numeric = True\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NumericType(NamedType):\n <docstring token>\n <assignment token>\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IntType(NumericType):\n is_int = True\n is_int_like = True\n name = 'int'\n signed = True\n rank = 4\n itemsize = 4\n kind = INT_KIND\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IntType(NumericType):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def to_llvm(self, context):\n if self.itemsize == 1:\n return lc.Type.int(8)\n elif self.itemsize == 2:\n return lc.Type.int(16)\n elif self.itemsize == 4:\n return lc.Type.int(32)\n else:\n assert self.itemsize == 8, self\n return lc.Type.int(64)\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IntType(NumericType):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FloatType(NumericType):\n is_float = True\n kind = FLOAT_KIND\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FloatType(NumericType):\n <assignment token>\n <assignment token>\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.itemsize]\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FloatType(NumericType):\n <assignment token>\n <assignment token>\n <function token>\n\n def to_llvm(self, context):\n if self.itemsize == 4:\n return lc.Type.float()\n elif self.itemsize == 8:\n return lc.Type.double()\n else:\n assert self.itemsize == 16\n return lc.Type.fp128()\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FloatType(NumericType):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ComplexType(NumericType):\n is_complex = True\n subtypes = ['base_type']\n kind = COMPLEX_KIND\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ComplexType(NumericType):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Py_ssize_t_Type(IntType):\n is_py_ssize_t = True\n name = 'Py_ssize_t'\n rank = 9\n signed = True\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Py_ssize_t_Type(IntType):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n super(Py_ssize_t_Type, self).__init__(**kwds)\n self.itemsize = _plat_bits / 8\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Py_ssize_t_Type(IntType):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NPyIntp(IntType):\n is_numpy_intp = True\n name = 'npy_intp'\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NPyIntp(IntType):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwds):\n super(NPyIntp, self).__init__(**kwds)\n import numpy as np\n ctypes_array = np.empty(0).ctypes.strides\n self.itemsize = ctypes.sizeof(ctypes_array._type_)\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NPyIntp(IntType):\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass CharType(IntType):\n is_char = True\n name = 'char'\n rank = 1\n signed = True\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass CharType(IntType):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def to_llvm(self, context):\n return lc.Type.int(8)\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass CharType(IntType):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass CStringType(Type):\n is_c_string = True\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass CStringType(Type):\n <assignment token>\n\n def __repr__(self):\n return 'const char *'\n\n def to_llvm(self, context):\n return char.pointer().to_llvm(context)\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass CStringType(Type):\n <assignment token>\n\n def __repr__(self):\n return 'const char *'\n <function token>\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass CStringType(Type):\n <assignment token>\n <function token>\n <function token>\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass VoidType(NamedType):\n is_void = True\n name = 'void'\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass VoidType(NamedType):\n <assignment token>\n <assignment token>\n\n def to_llvm(self, context):\n return lc.Type.void()\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass VoidType(NamedType):\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ObjectType(Type):\n is_object = True\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ObjectType(Type):\n <assignment token>\n\n def __repr__(self):\n return 'PyObject *'\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ObjectType(Type):\n <assignment token>\n <function token>\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FunctionType(Type):\n subtypes = ['return_type', 'args']\n is_function = True\n is_vararg = False\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FunctionType(Type):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n\n def __str__(self):\n args = map(str, self.args)\n if self.is_vararg:\n args.append('...')\n return '%s (*)(%s)' % (self.return_type, ', '.join(args))\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FunctionType(Type):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def to_llvm(self, context):\n return lc.Type.function(self.return_type.to_llvm(context), [\n arg_type.to_llvm(context) for arg_type in self.args], self.\n is_vararg)\n <function token>\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FunctionType(Type):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass VectorType(Type):\n subtypes = ['element_type']\n is_vector = True\n vector_size = None\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass VectorType(Type):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n\n def __str__(self):\n itemsize = self.element_type.itemsize\n if self.element_type.is_float:\n if itemsize == 4:\n return '__m128'\n else:\n return '__m128d'\n elif itemsize == 4:\n return '__m128i'\n else:\n raise NotImplementedError\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass VectorType(Type):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, element_type, vector_size, **kwds):\n super(VectorType, self).__init__(**kwds)\n assert (element_type.is_int or element_type.is_float\n ) and element_type.itemsize in (4, 8), element_type\n self.element_type = element_type\n self.vector_size = vector_size\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass VectorType(Type):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n\n @property\n def comparison_type_list(self):\n return self.subtype_list + [self.vector_size]\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass VectorType(Type):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def to_llvm(self, context):\n return lc.Type.vector(self.element_type.to_llvm(context), self.\n vector_size)\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass VectorType(Type):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<code token>\n<import token>\n<code token>\n<class token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n"
] | false |
98,361 |
f477aeba54b2b66a40967b347de05ac961b00298
|
import keras
from perturbation import *
from sklearn.metrics import accuracy_score
model = keras.models.load_model('nn_model/std_model.dat')
model.load_weights('nn_model/std_model_weights.dat')
pertMan = get_pertubated_test_data()
pertMan.show_content()
for i in range(pertMan.get_num_groups()):
pred_Y = np.argmax(model.predict(pertMan.data[i]), axis=1)
Y = np.argmax(pertMan.Y, axis=1)
print('acc: {}'.format(accuracy_score(Y, pred_Y)))
|
[
"import keras\nfrom perturbation import *\nfrom sklearn.metrics import accuracy_score\n\nmodel = keras.models.load_model('nn_model/std_model.dat')\nmodel.load_weights('nn_model/std_model_weights.dat')\n\npertMan = get_pertubated_test_data()\npertMan.show_content()\n\nfor i in range(pertMan.get_num_groups()):\n pred_Y = np.argmax(model.predict(pertMan.data[i]), axis=1)\n Y = np.argmax(pertMan.Y, axis=1)\n print('acc: {}'.format(accuracy_score(Y, pred_Y)))\n",
"import keras\nfrom perturbation import *\nfrom sklearn.metrics import accuracy_score\nmodel = keras.models.load_model('nn_model/std_model.dat')\nmodel.load_weights('nn_model/std_model_weights.dat')\npertMan = get_pertubated_test_data()\npertMan.show_content()\nfor i in range(pertMan.get_num_groups()):\n pred_Y = np.argmax(model.predict(pertMan.data[i]), axis=1)\n Y = np.argmax(pertMan.Y, axis=1)\n print('acc: {}'.format(accuracy_score(Y, pred_Y)))\n",
"<import token>\nmodel = keras.models.load_model('nn_model/std_model.dat')\nmodel.load_weights('nn_model/std_model_weights.dat')\npertMan = get_pertubated_test_data()\npertMan.show_content()\nfor i in range(pertMan.get_num_groups()):\n pred_Y = np.argmax(model.predict(pertMan.data[i]), axis=1)\n Y = np.argmax(pertMan.Y, axis=1)\n print('acc: {}'.format(accuracy_score(Y, pred_Y)))\n",
"<import token>\n<assignment token>\nmodel.load_weights('nn_model/std_model_weights.dat')\n<assignment token>\npertMan.show_content()\nfor i in range(pertMan.get_num_groups()):\n pred_Y = np.argmax(model.predict(pertMan.data[i]), axis=1)\n Y = np.argmax(pertMan.Y, axis=1)\n print('acc: {}'.format(accuracy_score(Y, pred_Y)))\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,362 |
cb713f5ed627abfd8d2ef153c3a2adc1d2db7045
|
from easel import *
from browser import *
def discardCanvases():
for cs in document.getElementsByTagName("canvas"):
cs.parentNode.removeChild(cs)
discardCanvases()
canvas = document.createElement("canvas")
canvas.width = 960
canvas.height = 400
container = document.getElementById("canvas-container")
container.appendChild(canvas)
stage = Stage(canvas)
Ticker.addEventListener("tick", stage)
movieClip = MovieClip(None, 0, True, {"start": 0, "middle": 40})
stage.addChild(movieClip)
child1 = Shape(Graphics().beginFill("#999999").drawCircle(100, 100, 100))
child2 = Shape(Graphics().beginFill("#5a9cfb").drawCircle(100, 100, 100))
timeline = movieClip.timeline
timeline.addTween(Tween.get(child1).to({"x": 0}).to({"x":760}, 40).to({"x": 0}, 40))
timeline.addTween(Tween.get(child2).to({"x": 760}).to({"x":0}, 40).to({"x": 760}, 40))
movieClip.gotoAndPlay("middle")
|
[
"from easel import *\nfrom browser import *\n\ndef discardCanvases():\n for cs in document.getElementsByTagName(\"canvas\"):\n cs.parentNode.removeChild(cs)\n \ndiscardCanvases()\ncanvas = document.createElement(\"canvas\")\ncanvas.width = 960\ncanvas.height = 400\ncontainer = document.getElementById(\"canvas-container\")\ncontainer.appendChild(canvas)\n\nstage = Stage(canvas)\n\nTicker.addEventListener(\"tick\", stage)\n\nmovieClip = MovieClip(None, 0, True, {\"start\": 0, \"middle\": 40})\nstage.addChild(movieClip)\n\nchild1 = Shape(Graphics().beginFill(\"#999999\").drawCircle(100, 100, 100))\nchild2 = Shape(Graphics().beginFill(\"#5a9cfb\").drawCircle(100, 100, 100))\n\ntimeline = movieClip.timeline\n\ntimeline.addTween(Tween.get(child1).to({\"x\": 0}).to({\"x\":760}, 40).to({\"x\": 0}, 40))\ntimeline.addTween(Tween.get(child2).to({\"x\": 760}).to({\"x\":0}, 40).to({\"x\": 760}, 40))\n\nmovieClip.gotoAndPlay(\"middle\")",
"from easel import *\nfrom browser import *\n\n\ndef discardCanvases():\n for cs in document.getElementsByTagName('canvas'):\n cs.parentNode.removeChild(cs)\n\n\ndiscardCanvases()\ncanvas = document.createElement('canvas')\ncanvas.width = 960\ncanvas.height = 400\ncontainer = document.getElementById('canvas-container')\ncontainer.appendChild(canvas)\nstage = Stage(canvas)\nTicker.addEventListener('tick', stage)\nmovieClip = MovieClip(None, 0, True, {'start': 0, 'middle': 40})\nstage.addChild(movieClip)\nchild1 = Shape(Graphics().beginFill('#999999').drawCircle(100, 100, 100))\nchild2 = Shape(Graphics().beginFill('#5a9cfb').drawCircle(100, 100, 100))\ntimeline = movieClip.timeline\ntimeline.addTween(Tween.get(child1).to({'x': 0}).to({'x': 760}, 40).to({'x':\n 0}, 40))\ntimeline.addTween(Tween.get(child2).to({'x': 760}).to({'x': 0}, 40).to({'x':\n 760}, 40))\nmovieClip.gotoAndPlay('middle')\n",
"<import token>\n\n\ndef discardCanvases():\n for cs in document.getElementsByTagName('canvas'):\n cs.parentNode.removeChild(cs)\n\n\ndiscardCanvases()\ncanvas = document.createElement('canvas')\ncanvas.width = 960\ncanvas.height = 400\ncontainer = document.getElementById('canvas-container')\ncontainer.appendChild(canvas)\nstage = Stage(canvas)\nTicker.addEventListener('tick', stage)\nmovieClip = MovieClip(None, 0, True, {'start': 0, 'middle': 40})\nstage.addChild(movieClip)\nchild1 = Shape(Graphics().beginFill('#999999').drawCircle(100, 100, 100))\nchild2 = Shape(Graphics().beginFill('#5a9cfb').drawCircle(100, 100, 100))\ntimeline = movieClip.timeline\ntimeline.addTween(Tween.get(child1).to({'x': 0}).to({'x': 760}, 40).to({'x':\n 0}, 40))\ntimeline.addTween(Tween.get(child2).to({'x': 760}).to({'x': 0}, 40).to({'x':\n 760}, 40))\nmovieClip.gotoAndPlay('middle')\n",
"<import token>\n\n\ndef discardCanvases():\n for cs in document.getElementsByTagName('canvas'):\n cs.parentNode.removeChild(cs)\n\n\ndiscardCanvases()\n<assignment token>\ncontainer.appendChild(canvas)\n<assignment token>\nTicker.addEventListener('tick', stage)\n<assignment token>\nstage.addChild(movieClip)\n<assignment token>\ntimeline.addTween(Tween.get(child1).to({'x': 0}).to({'x': 760}, 40).to({'x':\n 0}, 40))\ntimeline.addTween(Tween.get(child2).to({'x': 760}).to({'x': 0}, 40).to({'x':\n 760}, 40))\nmovieClip.gotoAndPlay('middle')\n",
"<import token>\n\n\ndef discardCanvases():\n for cs in document.getElementsByTagName('canvas'):\n cs.parentNode.removeChild(cs)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,363 |
0da0ab41e0369b4719f9df20290641f26acdd1a7
|
import os
import json
import datetime
BASE_PATH = '/Users/maxfowler/Desktop/pw-posts'
POSTS_PATH = os.path.join(BASE_PATH, '2017-posts')
def load_posts():
f_names = os.listdir(POSTS_PATH)
all_posts = []
for f_name in f_names:
f_path = os.path.join(POSTS_PATH, f_name)
posts = json.loads(open(f_path, 'r').read())
all_posts += posts
return all_posts
def exp1():
MONTHS_PATH = os.path.join(BASE_PATH, 'posts-by-month')
if not os.path.exists(MONTHS_PATH):
os.makedirs(MONTHS_PATH)
posts = load_posts()
posts_by_month = {}
for post in posts:
dt = datetime.datetime.fromtimestamp(post['date'])
month_posts = posts_by_month.setdefault(dt.strftime('%B'), [])
month_posts.append(post)
for month, month_posts in posts_by_month.items():
month_path = os.path.join(MONTHS_PATH, '{}.json').format(month)
print '++ writing to {}'.format(month_path)
with open(month_path, 'w') as f:
f.write(json.dumps(month_posts))
def by_day():
DAYS_PATH = os.path.join(BASE_PATH, 'posts-by-day')
if not os.path.exists(DAYS_PATH):
os.makedirs(DAYS_PATH)
posts = load_posts()
posts_by_day = {}
for post in posts:
dt = datetime.datetime.fromtimestamp(post['date'])
day_posts = posts_by_day.setdefault(dt.strftime('%B-%d'), [])
day_posts.append(post)
for day, day_posts in posts_by_day.items():
day_path = os.path.join(DAYS_PATH, '{}.json').format(day)
print '++ writing to {}'.format(day_path)
with open(day_path, 'w') as f:
f.write(json.dumps(day_posts))
def by_week():
DAYS_PATH = os.path.join(BASE_PATH, 'posts-by-day')
if not os.path.exists(DAYS_PATH):
os.makedirs(DAYS_PATH)
posts = load_posts()
posts_by_day = {}
for post in posts:
dt = datetime.datetime.fromtimestamp(post['date'])
day_posts = posts_by_day.setdefault(dt.strftime('%B-%d'), [])
day_posts.append(post)
for day, day_posts in posts_by_day.items():
day_path = os.path.join(DAYS_PATH, '{}.json').format(day)
print '++ writing to {}'.format(day_path)
with open(day_path, 'w') as f:
f.write(json.dumps(day_posts))
if __name__ == '__main__':
by_day()
|
[
"import os\nimport json\nimport datetime\n\nBASE_PATH = '/Users/maxfowler/Desktop/pw-posts'\nPOSTS_PATH = os.path.join(BASE_PATH, '2017-posts')\n\n\ndef load_posts():\n f_names = os.listdir(POSTS_PATH)\n all_posts = []\n for f_name in f_names:\n f_path = os.path.join(POSTS_PATH, f_name)\n posts = json.loads(open(f_path, 'r').read())\n all_posts += posts\n return all_posts\n\n\ndef exp1():\n MONTHS_PATH = os.path.join(BASE_PATH, 'posts-by-month')\n if not os.path.exists(MONTHS_PATH):\n os.makedirs(MONTHS_PATH)\n posts = load_posts()\n posts_by_month = {}\n for post in posts:\n dt = datetime.datetime.fromtimestamp(post['date'])\n month_posts = posts_by_month.setdefault(dt.strftime('%B'), [])\n month_posts.append(post)\n for month, month_posts in posts_by_month.items():\n month_path = os.path.join(MONTHS_PATH, '{}.json').format(month)\n print '++ writing to {}'.format(month_path)\n with open(month_path, 'w') as f:\n f.write(json.dumps(month_posts))\n\n\ndef by_day():\n DAYS_PATH = os.path.join(BASE_PATH, 'posts-by-day')\n if not os.path.exists(DAYS_PATH):\n os.makedirs(DAYS_PATH)\n posts = load_posts()\n posts_by_day = {}\n for post in posts:\n dt = datetime.datetime.fromtimestamp(post['date'])\n day_posts = posts_by_day.setdefault(dt.strftime('%B-%d'), [])\n day_posts.append(post)\n for day, day_posts in posts_by_day.items():\n day_path = os.path.join(DAYS_PATH, '{}.json').format(day)\n print '++ writing to {}'.format(day_path)\n with open(day_path, 'w') as f:\n f.write(json.dumps(day_posts))\n\ndef by_week():\n DAYS_PATH = os.path.join(BASE_PATH, 'posts-by-day')\n if not os.path.exists(DAYS_PATH):\n os.makedirs(DAYS_PATH)\n posts = load_posts()\n posts_by_day = {}\n for post in posts:\n dt = datetime.datetime.fromtimestamp(post['date'])\n day_posts = posts_by_day.setdefault(dt.strftime('%B-%d'), [])\n day_posts.append(post)\n for day, day_posts in posts_by_day.items():\n day_path = os.path.join(DAYS_PATH, '{}.json').format(day)\n print '++ writing to {}'.format(day_path)\n with open(day_path, 'w') as f:\n f.write(json.dumps(day_posts))\n\n\nif __name__ == '__main__':\n by_day()\n"
] | true |
98,364 |
6742539490ff8d5314dfacf524e03dd3abcf67d5
|
# -*- coding: utf-8 -*-
from eorsky import pspec_funcs, comoving_voxel_volume, comoving_radial_length, comoving_transverse_length
from astropy.cosmology import WMAP9
import nose.tools as nt
import numpy as np
import pylab as pl
import matplotlib.colors
from matplotlib.ticker import FormatStrFormatter
import matplotlib.gridspec as gridspec
import matplotlib.colors as mcolors
import matplotlib.cm as cm
def compare_averages_shell_pspec_dft():
"""
Take a gaussian shell and confirm its power spectrum using shell_project_pspec.
"""
select_radius = 5. #degrees
Nside=256
Npix = 12 * Nside**2
Omega = 4*np.pi/float(Npix)
Nfreq = 100
freqs = np.linspace(167.0, 177.0, Nfreq)
dnu = np.diff(freqs)[0]
Z = 1420/freqs - 1.
sig = 2.0
mu = 0.0
shell = np.random.normal(mu, sig, (Npix, Nfreq))
dV = comoving_voxel_volume(Z[Nfreq/2], dnu, Omega)
variances = []
means = []
pks = []
gs = gridspec.GridSpec(2, 3)
fig = pl.figure()
ax0 = pl.subplot(gs[0, 0:2])
ax1 = pl.subplot(gs[1, 0])
ax3 = pl.subplot(gs[1, 1])
ax2 = pl.subplot(gs[:, 2])
steps = range(10,110,10)
vmin,vmax = min(steps),max(steps)
normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)
colormap = cm.viridis
for n in steps:
Nkbins = 100
kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, select_radius, freqs=freqs, Nkbins=Nkbins, N_sections=n, cosmo=True, method='dft', error=False)
variances.append(np.var(pk[0:Nkbins-5]))
means.append(np.mean(pk[0:Nkbins-5]))
pks.append(pk)
ax0.plot(kbins, pk, label=str(n), color=colormap(normalize(n)))
ax0.axhline(y=dV*sig**2, color='k', lw=2.0)
# ax0.legend()
scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)
scalarmappable.set_array(steps)
fig.colorbar(scalarmappable,label=r'Number of snapshots', ax=ax0)
ax0.set_ylabel(r"P(k) [mK$^2$ Mpc$^{3}]$")
ax0.set_xlabel(r"k [Mpc$^{-1}]$")
ax1.plot(steps, np.array(variances), label="Variance")
ax1.set_ylabel(r"Variance(P(k)) [mK$^4$ Mpc$^{6}]$")
ax1.set_xlabel(u"Number of 5° snapshots")
ax3.plot(steps, means, label="Mean")
ax3.set_ylabel(r"Mean(P(k)) [mK$^2$ Mpc$^{3}]$")
ax3.set_xlabel(u"Number of 5° snapshots")
ax1.legend()
ax3.legend()
im = ax2.imshow(np.array(pks)[:,0:Nkbins-5], aspect='auto')#, norm=mcolors.LogNorm())
fig.colorbar(im, ax=ax2)
print('Fractional deviation: ', np.mean(np.abs(pk - dV*sig**2)))
pl.show()
def compare_selection_radii_shell_pspec_dft():
N_sections=10
Nside=256
Npix = 12 * Nside**2
Omega = 4*np.pi/float(Npix)
Nfreq = 100
freqs = np.linspace(167.0, 177.0, Nfreq)
dnu = np.diff(freqs)[0]
Z = 1420/freqs - 1.
sig = 2.0
mu = 0.0
shell = np.random.normal(mu, sig, (Npix, Nfreq))
dV = comoving_voxel_volume(Z[Nfreq/2], dnu, Omega)
variances = []
pks = []
means = []
gs = gridspec.GridSpec(2, 3)
fig = pl.figure()
ax0 = pl.subplot(gs[0, 0:2])
ax1 = pl.subplot(gs[1, 0])
ax3 = pl.subplot(gs[1, 1])
ax2 = pl.subplot(gs[:, 2])
steps = np.linspace(2,20,20)
vmin,vmax = min(steps),max(steps)
normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)
colormap = cm.viridis
for s in steps:
Nkbins = 100
kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, s, freqs=freqs, Nkbins=Nkbins, N_sections=N_sections, cosmo=True, method='dft', error=False)
variances.append(np.var(pk[0:Nkbins-5]))
pks.append(pk)
means.append(np.mean(pk[0:Nkbins-5]))
ax0.plot(kbins, pk, label=u'{:0.2f}°'.format(s), color=colormap(normalize(s)))
ax0.axhline(y=dV*sig**2)
# ax0.legend(ncol=2,loc=3)
scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)
scalarmappable.set_array(steps)
fig.colorbar(scalarmappable,label=r'Selection radius', ax=ax0)
ax0.set_ylabel(r"P(k) [mK$^2$ Mpc$^{3}]$")
ax0.set_xlabel(r"k [Mpc$^{-1}]$")
ax1.plot(steps, np.array(variances), label="Variance")
ax1.set_ylabel(r"Variance(P(k)) [mK$^4$ Mpc$^{6}]$")
ax1.set_xlabel(u"Selection radius (degrees)")
ax3.plot(steps, means, label="Mean")
ax3.set_ylabel(r"Mean(P(k)) [mK$^2$ Mpc$^{3}]$")
ax3.set_xlabel(u"Selection radius (degrees)")
ax1.legend()
ax3.legend()
ax1.xaxis.set_major_formatter(FormatStrFormatter(u'%0.2f°'))
ax3.xaxis.set_major_formatter(FormatStrFormatter(u'%0.2f°'))
im = ax2.imshow(np.array(pks)[:,0:Nkbins-5], aspect='auto', norm=mcolors.LogNorm())
fig.colorbar(im, ax=ax2)
pl.show()
if __name__ == '__main__':
#compare_averages_shell_pspec_dft()
compare_selection_radii_shell_pspec_dft()
|
[
"# -*- coding: utf-8 -*-\nfrom eorsky import pspec_funcs, comoving_voxel_volume, comoving_radial_length, comoving_transverse_length\nfrom astropy.cosmology import WMAP9\nimport nose.tools as nt\nimport numpy as np\nimport pylab as pl\nimport matplotlib.colors\nfrom matplotlib.ticker import FormatStrFormatter\nimport matplotlib.gridspec as gridspec\nimport matplotlib.colors as mcolors\nimport matplotlib.cm as cm\n\n\ndef compare_averages_shell_pspec_dft():\n \"\"\"\n Take a gaussian shell and confirm its power spectrum using shell_project_pspec.\n \"\"\"\n\n select_radius = 5. #degrees\n\n Nside=256\n Npix = 12 * Nside**2\n Omega = 4*np.pi/float(Npix)\n\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420/freqs - 1.\n\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n\n dV = comoving_voxel_volume(Z[Nfreq/2], dnu, Omega)\n variances = []\n means = []\n pks = []\n\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n\n steps = range(10,110,10)\n vmin,vmax = min(steps),max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n\n for n in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, select_radius, freqs=freqs, Nkbins=Nkbins, N_sections=n, cosmo=True, method='dft', error=False)\n variances.append(np.var(pk[0:Nkbins-5]))\n means.append(np.mean(pk[0:Nkbins-5]))\n pks.append(pk)\n ax0.plot(kbins, pk, label=str(n), color=colormap(normalize(n)))\n\n ax0.axhline(y=dV*sig**2, color='k', lw=2.0)\n# ax0.legend()\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable,label=r'Number of snapshots', ax=ax0)\n ax0.set_ylabel(r\"P(k) [mK$^2$ Mpc$^{3}]$\")\n ax0.set_xlabel(r\"k [Mpc$^{-1}]$\")\n ax1.plot(steps, np.array(variances), label=\"Variance\")\n ax1.set_ylabel(r\"Variance(P(k)) [mK$^4$ Mpc$^{6}]$\")\n ax1.set_xlabel(u\"Number of 5° snapshots\")\n ax3.plot(steps, means, label=\"Mean\")\n ax3.set_ylabel(r\"Mean(P(k)) [mK$^2$ Mpc$^{3}]$\")\n ax3.set_xlabel(u\"Number of 5° snapshots\")\n ax1.legend()\n ax3.legend()\n im = ax2.imshow(np.array(pks)[:,0:Nkbins-5], aspect='auto')#, norm=mcolors.LogNorm())\n fig.colorbar(im, ax=ax2)\n print('Fractional deviation: ', np.mean(np.abs(pk - dV*sig**2)))\n pl.show()\n\ndef compare_selection_radii_shell_pspec_dft():\n N_sections=10\n\n Nside=256\n Npix = 12 * Nside**2\n Omega = 4*np.pi/float(Npix)\n\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420/freqs - 1.\n\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n\n dV = comoving_voxel_volume(Z[Nfreq/2], dnu, Omega)\n variances = []\n pks = []\n means = []\n\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n\n steps = np.linspace(2,20,20)\n vmin,vmax = min(steps),max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n\n for s in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, s, freqs=freqs, Nkbins=Nkbins, N_sections=N_sections, cosmo=True, method='dft', error=False)\n variances.append(np.var(pk[0:Nkbins-5]))\n pks.append(pk)\n means.append(np.mean(pk[0:Nkbins-5]))\n ax0.plot(kbins, pk, label=u'{:0.2f}°'.format(s), color=colormap(normalize(s)))\n\n ax0.axhline(y=dV*sig**2)\n# ax0.legend(ncol=2,loc=3)\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable,label=r'Selection radius', ax=ax0)\n ax0.set_ylabel(r\"P(k) [mK$^2$ Mpc$^{3}]$\")\n ax0.set_xlabel(r\"k [Mpc$^{-1}]$\")\n ax1.plot(steps, np.array(variances), label=\"Variance\")\n ax1.set_ylabel(r\"Variance(P(k)) [mK$^4$ Mpc$^{6}]$\")\n ax1.set_xlabel(u\"Selection radius (degrees)\")\n ax3.plot(steps, means, label=\"Mean\")\n ax3.set_ylabel(r\"Mean(P(k)) [mK$^2$ Mpc$^{3}]$\")\n ax3.set_xlabel(u\"Selection radius (degrees)\")\n ax1.legend()\n ax3.legend()\n ax1.xaxis.set_major_formatter(FormatStrFormatter(u'%0.2f°'))\n ax3.xaxis.set_major_formatter(FormatStrFormatter(u'%0.2f°'))\n im = ax2.imshow(np.array(pks)[:,0:Nkbins-5], aspect='auto', norm=mcolors.LogNorm())\n fig.colorbar(im, ax=ax2)\n pl.show()\n\n\n\nif __name__ == '__main__':\n #compare_averages_shell_pspec_dft()\n compare_selection_radii_shell_pspec_dft()\n",
"from eorsky import pspec_funcs, comoving_voxel_volume, comoving_radial_length, comoving_transverse_length\nfrom astropy.cosmology import WMAP9\nimport nose.tools as nt\nimport numpy as np\nimport pylab as pl\nimport matplotlib.colors\nfrom matplotlib.ticker import FormatStrFormatter\nimport matplotlib.gridspec as gridspec\nimport matplotlib.colors as mcolors\nimport matplotlib.cm as cm\n\n\ndef compare_averages_shell_pspec_dft():\n \"\"\"\n Take a gaussian shell and confirm its power spectrum using shell_project_pspec.\n \"\"\"\n select_radius = 5.0\n Nside = 256\n Npix = 12 * Nside ** 2\n Omega = 4 * np.pi / float(Npix)\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420 / freqs - 1.0\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n dV = comoving_voxel_volume(Z[Nfreq / 2], dnu, Omega)\n variances = []\n means = []\n pks = []\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n steps = range(10, 110, 10)\n vmin, vmax = min(steps), max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n for n in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside,\n select_radius, freqs=freqs, Nkbins=Nkbins, N_sections=n, cosmo=\n True, method='dft', error=False)\n variances.append(np.var(pk[0:Nkbins - 5]))\n means.append(np.mean(pk[0:Nkbins - 5]))\n pks.append(pk)\n ax0.plot(kbins, pk, label=str(n), color=colormap(normalize(n)))\n ax0.axhline(y=dV * sig ** 2, color='k', lw=2.0)\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable, label='Number of snapshots', ax=ax0)\n ax0.set_ylabel('P(k) [mK$^2$ Mpc$^{3}]$')\n ax0.set_xlabel('k [Mpc$^{-1}]$')\n ax1.plot(steps, np.array(variances), label='Variance')\n ax1.set_ylabel('Variance(P(k)) [mK$^4$ Mpc$^{6}]$')\n ax1.set_xlabel(u'Number of 5° snapshots')\n ax3.plot(steps, means, label='Mean')\n ax3.set_ylabel('Mean(P(k)) [mK$^2$ Mpc$^{3}]$')\n ax3.set_xlabel(u'Number of 5° snapshots')\n ax1.legend()\n ax3.legend()\n im = ax2.imshow(np.array(pks)[:, 0:Nkbins - 5], aspect='auto')\n fig.colorbar(im, ax=ax2)\n print('Fractional deviation: ', np.mean(np.abs(pk - dV * sig ** 2)))\n pl.show()\n\n\ndef compare_selection_radii_shell_pspec_dft():\n N_sections = 10\n Nside = 256\n Npix = 12 * Nside ** 2\n Omega = 4 * np.pi / float(Npix)\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420 / freqs - 1.0\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n dV = comoving_voxel_volume(Z[Nfreq / 2], dnu, Omega)\n variances = []\n pks = []\n means = []\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n steps = np.linspace(2, 20, 20)\n vmin, vmax = min(steps), max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n for s in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, s, freqs=\n freqs, Nkbins=Nkbins, N_sections=N_sections, cosmo=True, method\n ='dft', error=False)\n variances.append(np.var(pk[0:Nkbins - 5]))\n pks.append(pk)\n means.append(np.mean(pk[0:Nkbins - 5]))\n ax0.plot(kbins, pk, label=u'{:0.2f}°'.format(s), color=colormap(\n normalize(s)))\n ax0.axhline(y=dV * sig ** 2)\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable, label='Selection radius', ax=ax0)\n ax0.set_ylabel('P(k) [mK$^2$ Mpc$^{3}]$')\n ax0.set_xlabel('k [Mpc$^{-1}]$')\n ax1.plot(steps, np.array(variances), label='Variance')\n ax1.set_ylabel('Variance(P(k)) [mK$^4$ Mpc$^{6}]$')\n ax1.set_xlabel(u'Selection radius (degrees)')\n ax3.plot(steps, means, label='Mean')\n ax3.set_ylabel('Mean(P(k)) [mK$^2$ Mpc$^{3}]$')\n ax3.set_xlabel(u'Selection radius (degrees)')\n ax1.legend()\n ax3.legend()\n ax1.xaxis.set_major_formatter(FormatStrFormatter(u'%0.2f°'))\n ax3.xaxis.set_major_formatter(FormatStrFormatter(u'%0.2f°'))\n im = ax2.imshow(np.array(pks)[:, 0:Nkbins - 5], aspect='auto', norm=\n mcolors.LogNorm())\n fig.colorbar(im, ax=ax2)\n pl.show()\n\n\nif __name__ == '__main__':\n compare_selection_radii_shell_pspec_dft()\n",
"<import token>\n\n\ndef compare_averages_shell_pspec_dft():\n \"\"\"\n Take a gaussian shell and confirm its power spectrum using shell_project_pspec.\n \"\"\"\n select_radius = 5.0\n Nside = 256\n Npix = 12 * Nside ** 2\n Omega = 4 * np.pi / float(Npix)\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420 / freqs - 1.0\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n dV = comoving_voxel_volume(Z[Nfreq / 2], dnu, Omega)\n variances = []\n means = []\n pks = []\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n steps = range(10, 110, 10)\n vmin, vmax = min(steps), max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n for n in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside,\n select_radius, freqs=freqs, Nkbins=Nkbins, N_sections=n, cosmo=\n True, method='dft', error=False)\n variances.append(np.var(pk[0:Nkbins - 5]))\n means.append(np.mean(pk[0:Nkbins - 5]))\n pks.append(pk)\n ax0.plot(kbins, pk, label=str(n), color=colormap(normalize(n)))\n ax0.axhline(y=dV * sig ** 2, color='k', lw=2.0)\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable, label='Number of snapshots', ax=ax0)\n ax0.set_ylabel('P(k) [mK$^2$ Mpc$^{3}]$')\n ax0.set_xlabel('k [Mpc$^{-1}]$')\n ax1.plot(steps, np.array(variances), label='Variance')\n ax1.set_ylabel('Variance(P(k)) [mK$^4$ Mpc$^{6}]$')\n ax1.set_xlabel(u'Number of 5° snapshots')\n ax3.plot(steps, means, label='Mean')\n ax3.set_ylabel('Mean(P(k)) [mK$^2$ Mpc$^{3}]$')\n ax3.set_xlabel(u'Number of 5° snapshots')\n ax1.legend()\n ax3.legend()\n im = ax2.imshow(np.array(pks)[:, 0:Nkbins - 5], aspect='auto')\n fig.colorbar(im, ax=ax2)\n print('Fractional deviation: ', np.mean(np.abs(pk - dV * sig ** 2)))\n pl.show()\n\n\ndef compare_selection_radii_shell_pspec_dft():\n N_sections = 10\n Nside = 256\n Npix = 12 * Nside ** 2\n Omega = 4 * np.pi / float(Npix)\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420 / freqs - 1.0\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n dV = comoving_voxel_volume(Z[Nfreq / 2], dnu, Omega)\n variances = []\n pks = []\n means = []\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n steps = np.linspace(2, 20, 20)\n vmin, vmax = min(steps), max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n for s in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, s, freqs=\n freqs, Nkbins=Nkbins, N_sections=N_sections, cosmo=True, method\n ='dft', error=False)\n variances.append(np.var(pk[0:Nkbins - 5]))\n pks.append(pk)\n means.append(np.mean(pk[0:Nkbins - 5]))\n ax0.plot(kbins, pk, label=u'{:0.2f}°'.format(s), color=colormap(\n normalize(s)))\n ax0.axhline(y=dV * sig ** 2)\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable, label='Selection radius', ax=ax0)\n ax0.set_ylabel('P(k) [mK$^2$ Mpc$^{3}]$')\n ax0.set_xlabel('k [Mpc$^{-1}]$')\n ax1.plot(steps, np.array(variances), label='Variance')\n ax1.set_ylabel('Variance(P(k)) [mK$^4$ Mpc$^{6}]$')\n ax1.set_xlabel(u'Selection radius (degrees)')\n ax3.plot(steps, means, label='Mean')\n ax3.set_ylabel('Mean(P(k)) [mK$^2$ Mpc$^{3}]$')\n ax3.set_xlabel(u'Selection radius (degrees)')\n ax1.legend()\n ax3.legend()\n ax1.xaxis.set_major_formatter(FormatStrFormatter(u'%0.2f°'))\n ax3.xaxis.set_major_formatter(FormatStrFormatter(u'%0.2f°'))\n im = ax2.imshow(np.array(pks)[:, 0:Nkbins - 5], aspect='auto', norm=\n mcolors.LogNorm())\n fig.colorbar(im, ax=ax2)\n pl.show()\n\n\nif __name__ == '__main__':\n compare_selection_radii_shell_pspec_dft()\n",
"<import token>\n\n\ndef compare_averages_shell_pspec_dft():\n \"\"\"\n Take a gaussian shell and confirm its power spectrum using shell_project_pspec.\n \"\"\"\n select_radius = 5.0\n Nside = 256\n Npix = 12 * Nside ** 2\n Omega = 4 * np.pi / float(Npix)\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420 / freqs - 1.0\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n dV = comoving_voxel_volume(Z[Nfreq / 2], dnu, Omega)\n variances = []\n means = []\n pks = []\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n steps = range(10, 110, 10)\n vmin, vmax = min(steps), max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n for n in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside,\n select_radius, freqs=freqs, Nkbins=Nkbins, N_sections=n, cosmo=\n True, method='dft', error=False)\n variances.append(np.var(pk[0:Nkbins - 5]))\n means.append(np.mean(pk[0:Nkbins - 5]))\n pks.append(pk)\n ax0.plot(kbins, pk, label=str(n), color=colormap(normalize(n)))\n ax0.axhline(y=dV * sig ** 2, color='k', lw=2.0)\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable, label='Number of snapshots', ax=ax0)\n ax0.set_ylabel('P(k) [mK$^2$ Mpc$^{3}]$')\n ax0.set_xlabel('k [Mpc$^{-1}]$')\n ax1.plot(steps, np.array(variances), label='Variance')\n ax1.set_ylabel('Variance(P(k)) [mK$^4$ Mpc$^{6}]$')\n ax1.set_xlabel(u'Number of 5° snapshots')\n ax3.plot(steps, means, label='Mean')\n ax3.set_ylabel('Mean(P(k)) [mK$^2$ Mpc$^{3}]$')\n ax3.set_xlabel(u'Number of 5° snapshots')\n ax1.legend()\n ax3.legend()\n im = ax2.imshow(np.array(pks)[:, 0:Nkbins - 5], aspect='auto')\n fig.colorbar(im, ax=ax2)\n print('Fractional deviation: ', np.mean(np.abs(pk - dV * sig ** 2)))\n pl.show()\n\n\ndef compare_selection_radii_shell_pspec_dft():\n N_sections = 10\n Nside = 256\n Npix = 12 * Nside ** 2\n Omega = 4 * np.pi / float(Npix)\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420 / freqs - 1.0\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n dV = comoving_voxel_volume(Z[Nfreq / 2], dnu, Omega)\n variances = []\n pks = []\n means = []\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n steps = np.linspace(2, 20, 20)\n vmin, vmax = min(steps), max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n for s in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, s, freqs=\n freqs, Nkbins=Nkbins, N_sections=N_sections, cosmo=True, method\n ='dft', error=False)\n variances.append(np.var(pk[0:Nkbins - 5]))\n pks.append(pk)\n means.append(np.mean(pk[0:Nkbins - 5]))\n ax0.plot(kbins, pk, label=u'{:0.2f}°'.format(s), color=colormap(\n normalize(s)))\n ax0.axhline(y=dV * sig ** 2)\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable, label='Selection radius', ax=ax0)\n ax0.set_ylabel('P(k) [mK$^2$ Mpc$^{3}]$')\n ax0.set_xlabel('k [Mpc$^{-1}]$')\n ax1.plot(steps, np.array(variances), label='Variance')\n ax1.set_ylabel('Variance(P(k)) [mK$^4$ Mpc$^{6}]$')\n ax1.set_xlabel(u'Selection radius (degrees)')\n ax3.plot(steps, means, label='Mean')\n ax3.set_ylabel('Mean(P(k)) [mK$^2$ Mpc$^{3}]$')\n ax3.set_xlabel(u'Selection radius (degrees)')\n ax1.legend()\n ax3.legend()\n ax1.xaxis.set_major_formatter(FormatStrFormatter(u'%0.2f°'))\n ax3.xaxis.set_major_formatter(FormatStrFormatter(u'%0.2f°'))\n im = ax2.imshow(np.array(pks)[:, 0:Nkbins - 5], aspect='auto', norm=\n mcolors.LogNorm())\n fig.colorbar(im, ax=ax2)\n pl.show()\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef compare_selection_radii_shell_pspec_dft():\n N_sections = 10\n Nside = 256\n Npix = 12 * Nside ** 2\n Omega = 4 * np.pi / float(Npix)\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420 / freqs - 1.0\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n dV = comoving_voxel_volume(Z[Nfreq / 2], dnu, Omega)\n variances = []\n pks = []\n means = []\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n steps = np.linspace(2, 20, 20)\n vmin, vmax = min(steps), max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n for s in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, s, freqs=\n freqs, Nkbins=Nkbins, N_sections=N_sections, cosmo=True, method\n ='dft', error=False)\n variances.append(np.var(pk[0:Nkbins - 5]))\n pks.append(pk)\n means.append(np.mean(pk[0:Nkbins - 5]))\n ax0.plot(kbins, pk, label=u'{:0.2f}°'.format(s), color=colormap(\n normalize(s)))\n ax0.axhline(y=dV * sig ** 2)\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable, label='Selection radius', ax=ax0)\n ax0.set_ylabel('P(k) [mK$^2$ Mpc$^{3}]$')\n ax0.set_xlabel('k [Mpc$^{-1}]$')\n ax1.plot(steps, np.array(variances), label='Variance')\n ax1.set_ylabel('Variance(P(k)) [mK$^4$ Mpc$^{6}]$')\n ax1.set_xlabel(u'Selection radius (degrees)')\n ax3.plot(steps, means, label='Mean')\n ax3.set_ylabel('Mean(P(k)) [mK$^2$ Mpc$^{3}]$')\n ax3.set_xlabel(u'Selection radius (degrees)')\n ax1.legend()\n ax3.legend()\n ax1.xaxis.set_major_formatter(FormatStrFormatter(u'%0.2f°'))\n ax3.xaxis.set_major_formatter(FormatStrFormatter(u'%0.2f°'))\n im = ax2.imshow(np.array(pks)[:, 0:Nkbins - 5], aspect='auto', norm=\n mcolors.LogNorm())\n fig.colorbar(im, ax=ax2)\n pl.show()\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,365 |
846a2bab34898acc0fa5d51bdc4231b554eba675
|
'''
Created on Oct 9, 2018
@author: tongq
'''
class Solution(object):
def new21Game(self, N, K, W):
"""
:type N: int
:type K: int
:type W: int
:rtype: float
"""
n, k, w = N, K, W
if k == 0 or n >= k+w: return 1
dp = [1.0]+[0.0]*n
wSum = 1.0
for i in range(1, n+1):
dp[i] = wSum/w
if i < k: wSum += dp[i]
if i - w >= 0: wSum -= dp[i-w]
return sum(dp[k:])
def test(self):
testCases = [
[10, 1, 10],
[6, 1, 10],
[21, 17, 10],
]
for n, k, w in testCases:
result = self.new21Game(n, k, w)
print('result: %s' % result)
print('-='*30+'-')
if __name__ == '__main__':
Solution().test()
|
[
"'''\nCreated on Oct 9, 2018\n\n@author: tongq\n'''\nclass Solution(object):\n def new21Game(self, N, K, W):\n \"\"\"\n :type N: int\n :type K: int\n :type W: int\n :rtype: float\n \"\"\"\n n, k, w = N, K, W\n if k == 0 or n >= k+w: return 1\n dp = [1.0]+[0.0]*n\n wSum = 1.0\n for i in range(1, n+1):\n dp[i] = wSum/w\n if i < k: wSum += dp[i]\n if i - w >= 0: wSum -= dp[i-w]\n return sum(dp[k:])\n \n def test(self):\n testCases = [\n [10, 1, 10],\n [6, 1, 10],\n [21, 17, 10],\n ]\n for n, k, w in testCases:\n result = self.new21Game(n, k, w)\n print('result: %s' % result)\n print('-='*30+'-')\n\nif __name__ == '__main__':\n Solution().test()\n",
"<docstring token>\n\n\nclass Solution(object):\n\n def new21Game(self, N, K, W):\n \"\"\"\n :type N: int\n :type K: int\n :type W: int\n :rtype: float\n \"\"\"\n n, k, w = N, K, W\n if k == 0 or n >= k + w:\n return 1\n dp = [1.0] + [0.0] * n\n wSum = 1.0\n for i in range(1, n + 1):\n dp[i] = wSum / w\n if i < k:\n wSum += dp[i]\n if i - w >= 0:\n wSum -= dp[i - w]\n return sum(dp[k:])\n\n def test(self):\n testCases = [[10, 1, 10], [6, 1, 10], [21, 17, 10]]\n for n, k, w in testCases:\n result = self.new21Game(n, k, w)\n print('result: %s' % result)\n print('-=' * 30 + '-')\n\n\nif __name__ == '__main__':\n Solution().test()\n",
"<docstring token>\n\n\nclass Solution(object):\n\n def new21Game(self, N, K, W):\n \"\"\"\n :type N: int\n :type K: int\n :type W: int\n :rtype: float\n \"\"\"\n n, k, w = N, K, W\n if k == 0 or n >= k + w:\n return 1\n dp = [1.0] + [0.0] * n\n wSum = 1.0\n for i in range(1, n + 1):\n dp[i] = wSum / w\n if i < k:\n wSum += dp[i]\n if i - w >= 0:\n wSum -= dp[i - w]\n return sum(dp[k:])\n\n def test(self):\n testCases = [[10, 1, 10], [6, 1, 10], [21, 17, 10]]\n for n, k, w in testCases:\n result = self.new21Game(n, k, w)\n print('result: %s' % result)\n print('-=' * 30 + '-')\n\n\n<code token>\n",
"<docstring token>\n\n\nclass Solution(object):\n\n def new21Game(self, N, K, W):\n \"\"\"\n :type N: int\n :type K: int\n :type W: int\n :rtype: float\n \"\"\"\n n, k, w = N, K, W\n if k == 0 or n >= k + w:\n return 1\n dp = [1.0] + [0.0] * n\n wSum = 1.0\n for i in range(1, n + 1):\n dp[i] = wSum / w\n if i < k:\n wSum += dp[i]\n if i - w >= 0:\n wSum -= dp[i - w]\n return sum(dp[k:])\n <function token>\n\n\n<code token>\n",
"<docstring token>\n\n\nclass Solution(object):\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<class token>\n<code token>\n"
] | false |
98,366 |
fff4209166c7e27c6e4118afa2282919fd3eeb38
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Original source: github.com/okfn/bibserver
# Authors:
# markmacgillivray
# Etienne Posthumus (epoz)
# Francois Boulogne <fboulogne at april dot org>
import sys
import logging
import io
import re
from bibtexparser.bibdatabase import BibDatabase
logger = logging.getLogger(__name__)
__all__ = ['BibTexParser']
if sys.version_info >= (3, 0):
from io import StringIO
ustr = str
else:
from StringIO import StringIO
ustr = unicode
class BibTexParser(object):
"""
A parser for reading BibTeX bibliographic data files.
Example::
from bibtexparser.bparser import BibTexParser
bibtex_str = ...
parser = BibTexParser()
parser.ignore_nonstandard_types = False
parser.homogenise_fields = False
bib_database = bibtexparser.loads(bibtex_str, parser)
"""
def __new__(cls, data=None,
customization=None,
ignore_nonstandard_types=True,
homogenise_fields=True):
"""
To catch the old API structure in which creating the parser would immediately parse and return data.
"""
if data is None:
return super(BibTexParser, cls).__new__(cls)
else:
# For backwards compatibility: if data is given, parse and return the `BibDatabase` object instead of the
# parser.
parser = BibTexParser()
parser.customization = customization
parser.ignore_nonstandard_types = ignore_nonstandard_types
parser.homogenise_fields = homogenise_fields
return parser.parse(data)
def __init__(self):
"""
Creates a parser for rading BibTeX files
:return: parser
:rtype: `BibTexParser`
"""
self.bib_database = BibDatabase()
#: Callback function to process BibTeX entries after parsing, for example to create a list from a string with
#: multiple values. By default all BibTeX values are treated as simple strings. Default: `None`.
self.customization = None
#: Ignore non-standard BibTeX types (`book`, `article`, etc). Default: `True`.
self.ignore_nonstandard_types = True
#: Sanitise BibTeX field names, for example change `url` to `link` etc. Field names are always converted to
#: lowercase names. Default: `True`.
self.homogenise_fields = True
# On some sample data files, the character encoding detection simply
# hangs We are going to default to utf8, and mandate it.
self.encoding = 'utf8'
# pre-defined set of key changes
self.alt_dict = {
'keyw': 'keyword',
'keywords': 'keyword',
'authors': 'author',
'editors': 'editor',
'url': 'link',
'urls': 'link',
'links': 'link',
'subjects': 'subject'
}
self.replace_all_re = re.compile(r'((?P<pre>"?)\s*(#|^)\s*(?P<id>[^\d\W]\w*)\s*(#|$)\s*(?P<post>"?))', re.UNICODE)
def _bibtex_file_obj(self, bibtex_str):
# Some files have Byte-order marks inserted at the start
byte = '\xef\xbb\xbf'
if not isinstance(byte, ustr):
byte = ustr('\xef\xbb\xbf', self.encoding, 'ignore')
if bibtex_str[:3] == byte:
bibtex_str = bibtex_str[3:]
return StringIO(bibtex_str)
def parse(self, bibtex_str):
"""Parse a BibTeX string into an object
:param bibtex_str: BibTeX string
:type: str or unicode
:return: bibliographic database
:rtype: BibDatabase
"""
self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)
self._parse_records(customization=self.customization)
return self.bib_database
def parse_file(self, file):
"""Parse a BibTeX file into an object
:param file: BibTeX file or file-like object
:type: file
:return: bibliographic database
:rtype: BibDatabase
"""
return self.parse(file.read())
def _parse_records(self, customization=None):
"""Parse the bibtex into a list of records.
:param customization: a function
"""
def _add_parsed_record(record, records):
"""
Atomic function to parse a record
and append the result in records
"""
if record != "":
logger.debug('The record is not empty. Let\'s parse it.')
parsed = self._parse_record(record, customization=customization)
if parsed:
logger.debug('Store the result of the parsed record')
records.append(parsed)
else:
logger.debug('Nothing returned from the parsed record!')
else:
logger.debug('The record is empty')
records = []
record = ""
# read each line, bundle them up until they form an object, then send for parsing
for linenumber, line in enumerate(self.bibtex_file_obj):
logger.debug('Inspect line %s', linenumber)
if line.strip().startswith('@'):
# Remove leading whitespaces
line = line.lstrip()
logger.debug('Line starts with @')
# Parse previous record
_add_parsed_record(record, records)
# Start new record
logger.debug('The record is set to empty')
record = ""
# Keep adding lines to the record
record += line
# catch any remaining record and send it for parsing
_add_parsed_record(record, records)
logger.debug('Set the list of entries')
self.bib_database.entries = records
def _parse_record(self, record, customization=None):
"""Parse a record.
* tidy whitespace and other rubbish
* parse out the bibtype and citekey
* find all the key-value pairs it contains
:param record: a record
:param customization: a function
:returns: dict --
"""
d = {}
if not record.startswith('@'):
logger.debug('The record does not start with @. Return empty dict.')
return {}
# if a comment record, add to bib_database.comments
if record.lower().startswith('@comment'):
logger.debug('The record startswith @comment')
logger.debug('Store comment in list of comments')
self.bib_database.comments.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# if a preamble record, add to bib_database.preambles
if record.lower().startswith('@preamble'):
logger.debug('The record startswith @preamble')
logger.debug('Store preamble in list of preambles')
self.bib_database.preambles.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# prepare record
record = '\n'.join([i.strip() for i in record.split('\n')])
if '}\n' in record:
logger.debug('}\\n detected in the record. Clean up.')
record = record.replace('\r\n', '\n').replace('\r', '\n').rstrip('\n')
# treat the case for which the last line of the record
# does not have a coma
if record.endswith('}\n}') or record.endswith('}}'):
logger.debug('Missing coma in the last line of the record. Fix it.')
record = re.sub('}(\n|)}$', '},\n}', record)
# if a string record, put it in the replace_dict
if record.lower().startswith('@string'):
logger.debug('The record startswith @string')
key, val = [i.strip().strip('{').strip('}').replace('\n', ' ') for i in record.split('{', 1)[1].strip('\n').strip(',').strip('}').split('=')]
key = key.lower() # key is case insensitive
val = self._string_subst_partial(val)
if val.startswith('"') or val.lower() not in self.bib_database.strings:
self.bib_database.strings[key] = val.strip('"')
else:
self.bib_database.strings[key] = self.bib_database.strings[val.lower()]
logger.debug('Return a dict')
return d
# for each line in record
logger.debug('Split the record of its lines and treat them')
kvs = [i.strip() for i in record.split(',\n')]
inkey = ""
inval = ""
for kv in kvs:
logger.debug('Inspect: %s', kv)
# TODO: We may check that the keyword belongs to a known type
if kv.startswith('@') and not inkey:
# it is the start of the record - set the bibtype and citekey (id)
logger.debug('Line starts with @ and the key is not stored yet.')
bibtype, id = kv.split('{', 1)
bibtype = self._add_key(bibtype)
id = id.strip('}').strip(',')
logger.debug('bibtype = %s', bibtype)
logger.debug('id = %s', id)
if self.ignore_nonstandard_types and bibtype not in ('article',
'book',
'booklet',
'conference',
'inbook',
'incollection',
'inproceedings',
'manual',
'mastersthesis',
'misc',
'phdthesis',
'proceedings',
'techreport',
'unpublished'):
logger.warning('Entry type %s not standard. Not considered.', bibtype)
break
elif '=' in kv and not inkey:
# it is a line with a key value pair on it
logger.debug('Line contains a key-pair value and the key is not stored yet.')
key, val = [i.strip() for i in kv.split('=', 1)]
key = self._add_key(key)
val = self._string_subst_partial(val)
# if it looks like the value spans lines, store details for next loop
if (val.count('{') != val.count('}')) or (val.startswith('"') and not val.replace('}', '').endswith('"')):
logger.debug('The line is not ending the record.')
inkey = key
inval = val
else:
logger.debug('The line is the end of the record.')
d[key] = self._add_val(val)
elif inkey:
logger.debug('Continues the previous line to complete the key pair value...')
# if this line continues the value from a previous line, append
inval += ', ' + kv
# if it looks like this line finishes the value, store it and clear for next loop
if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('"') and inval.endswith('"')):
logger.debug('This line represents the end of the current key-pair value')
d[inkey] = self._add_val(inval)
inkey = ""
inval = ""
else:
logger.debug('This line does NOT represent the end of the current key-pair value')
logger.debug('All lines have been treated')
if not d:
logger.debug('The dict is empty, return it.')
return d
d['ENTRYTYPE'] = bibtype
d['ID'] = id
if customization is None:
logger.debug('No customization to apply, return dict')
return d
else:
# apply any customizations to the record object then return it
logger.debug('Apply customizations and return dict')
return customization(d)
def _strip_quotes(self, val):
"""Strip double quotes enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip quotes')
val = val.strip()
if val.startswith('"') and val.endswith('"'):
return val[1:-1]
return val
def _strip_braces(self, val):
"""Strip braces enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip braces')
val = val.strip()
if val.startswith('{') and val.endswith('}') and self._full_span(val):
return val[1:-1]
return val
def _full_span(self, val):
cnt = 0
for i in range(0, len(val)):
if val[i] == '{':
cnt += 1
elif val[i] == '}':
cnt -= 1
if cnt == 0:
break
if i == len(val) - 1:
return True
else:
return False
def _string_subst(self, val):
""" Substitute string definitions
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Substitute string definitions')
if not val:
return ''
for k in list(self.bib_database.strings.keys()):
if val.lower() == k:
val = self.bib_database.strings[k]
if not isinstance(val, ustr):
val = ustr(val, self.encoding, 'ignore')
return val
def _string_subst_partial(self, val):
""" Substitute string definitions inside larger expressions
:param val: a value
:type val: string
:returns: string -- value
"""
def repl(m):
k = m.group('id')
replacement = self.bib_database.strings[k.lower()] if k.lower() in self.bib_database.strings else k
pre = '"' if m.group('pre') != '"' else ''
post = '"' if m.group('post') != '"' else ''
return pre + replacement + post
logger.debug('Substitute string definitions inside larger expressions')
if '#' not in val:
return val
# TODO?: Does not match two subsequent variables or strings, such as "start" # foo # bar # "end" or "start" # "end".
# TODO: Does not support braces instead of quotes, e.g.: {start} # foo # {bar}
# TODO: Does not support strings like: "te#s#t"
return self.replace_all_re.sub(repl, val)
def _add_val(self, val):
""" Clean instring before adding to dictionary
:param val: a value
:type val: string
:returns: string -- value
"""
if not val or val == "{}":
return ''
val = self._strip_braces(val)
val = self._strip_quotes(val)
val = self._strip_braces(val)
val = self._string_subst(val)
return val
def _add_key(self, key):
""" Add a key and homogeneize alternative forms.
:param key: a key
:type key: string
:returns: string -- value
"""
key = key.strip().strip('@').lower()
if self.homogenise_fields:
if key in list(self.alt_dict.keys()):
key = self.alt_dict[key]
if not isinstance(key, ustr):
return ustr(key, 'utf-8')
else:
return key
|
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Original source: github.com/okfn/bibserver\n# Authors:\n# markmacgillivray\n# Etienne Posthumus (epoz)\n# Francois Boulogne <fboulogne at april dot org>\n\nimport sys\nimport logging\nimport io\nimport re\nfrom bibtexparser.bibdatabase import BibDatabase\n\nlogger = logging.getLogger(__name__)\n\n__all__ = ['BibTexParser']\n\n\nif sys.version_info >= (3, 0):\n from io import StringIO\n ustr = str\nelse:\n from StringIO import StringIO\n ustr = unicode\n\n\nclass BibTexParser(object):\n \"\"\"\n A parser for reading BibTeX bibliographic data files.\n\n Example::\n\n from bibtexparser.bparser import BibTexParser\n\n bibtex_str = ...\n\n parser = BibTexParser()\n parser.ignore_nonstandard_types = False\n parser.homogenise_fields = False\n bib_database = bibtexparser.loads(bibtex_str, parser)\n \"\"\"\n\n def __new__(cls, data=None,\n customization=None,\n ignore_nonstandard_types=True,\n homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n # For backwards compatibility: if data is given, parse and return the `BibDatabase` object instead of the\n # parser.\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n\n def __init__(self):\n \"\"\"\n Creates a parser for rading BibTeX files\n\n :return: parser\n :rtype: `BibTexParser`\n \"\"\"\n self.bib_database = BibDatabase()\n #: Callback function to process BibTeX entries after parsing, for example to create a list from a string with\n #: multiple values. By default all BibTeX values are treated as simple strings. Default: `None`.\n self.customization = None\n\n #: Ignore non-standard BibTeX types (`book`, `article`, etc). Default: `True`.\n self.ignore_nonstandard_types = True\n\n #: Sanitise BibTeX field names, for example change `url` to `link` etc. Field names are always converted to\n #: lowercase names. Default: `True`.\n self.homogenise_fields = True\n\n # On some sample data files, the character encoding detection simply\n # hangs We are going to default to utf8, and mandate it.\n self.encoding = 'utf8'\n\n # pre-defined set of key changes\n self.alt_dict = {\n 'keyw': 'keyword',\n 'keywords': 'keyword',\n 'authors': 'author',\n 'editors': 'editor',\n 'url': 'link',\n 'urls': 'link',\n 'links': 'link',\n 'subjects': 'subject'\n }\n\n self.replace_all_re = re.compile(r'((?P<pre>\"?)\\s*(#|^)\\s*(?P<id>[^\\d\\W]\\w*)\\s*(#|$)\\s*(?P<post>\"?))', re.UNICODE)\n\n def _bibtex_file_obj(self, bibtex_str):\n # Some files have Byte-order marks inserted at the start\n byte = '\\xef\\xbb\\xbf'\n if not isinstance(byte, ustr):\n byte = ustr('\\xef\\xbb\\xbf', self.encoding, 'ignore')\n if bibtex_str[:3] == byte:\n bibtex_str = bibtex_str[3:]\n return StringIO(bibtex_str)\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n\n def _parse_records(self, customization=None):\n \"\"\"Parse the bibtex into a list of records.\n\n :param customization: a function\n \"\"\"\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != \"\":\n logger.debug('The record is not empty. Let\\'s parse it.')\n parsed = self._parse_record(record, customization=customization)\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n\n records = []\n record = \"\"\n # read each line, bundle them up until they form an object, then send for parsing\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n # Remove leading whitespaces\n line = line.lstrip()\n logger.debug('Line starts with @')\n # Parse previous record\n _add_parsed_record(record, records)\n # Start new record\n logger.debug('The record is set to empty')\n record = \"\"\n # Keep adding lines to the record\n record += line\n\n # catch any remaining record and send it for parsing\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records\n\n def _parse_record(self, record, customization=None):\n \"\"\"Parse a record.\n\n * tidy whitespace and other rubbish\n * parse out the bibtype and citekey\n * find all the key-value pairs it contains\n\n :param record: a record\n :param customization: a function\n\n :returns: dict --\n \"\"\"\n d = {}\n\n if not record.startswith('@'):\n logger.debug('The record does not start with @. Return empty dict.')\n return {}\n\n # if a comment record, add to bib_database.comments\n if record.lower().startswith('@comment'):\n logger.debug('The record startswith @comment')\n logger.debug('Store comment in list of comments')\n\n self.bib_database.comments.append(re.search('\\{(.*)\\}', record, re.DOTALL).group(1))\n\n logger.debug('Return an empty dict')\n return {}\n\n # if a preamble record, add to bib_database.preambles\n if record.lower().startswith('@preamble'):\n logger.debug('The record startswith @preamble')\n logger.debug('Store preamble in list of preambles')\n\n self.bib_database.preambles.append(re.search('\\{(.*)\\}', record, re.DOTALL).group(1))\n\n logger.debug('Return an empty dict')\n return {}\n\n # prepare record\n record = '\\n'.join([i.strip() for i in record.split('\\n')])\n if '}\\n' in record:\n logger.debug('}\\\\n detected in the record. Clean up.')\n record = record.replace('\\r\\n', '\\n').replace('\\r', '\\n').rstrip('\\n')\n # treat the case for which the last line of the record\n # does not have a coma\n if record.endswith('}\\n}') or record.endswith('}}'):\n logger.debug('Missing coma in the last line of the record. Fix it.')\n record = re.sub('}(\\n|)}$', '},\\n}', record)\n\n # if a string record, put it in the replace_dict\n if record.lower().startswith('@string'):\n logger.debug('The record startswith @string')\n key, val = [i.strip().strip('{').strip('}').replace('\\n', ' ') for i in record.split('{', 1)[1].strip('\\n').strip(',').strip('}').split('=')]\n key = key.lower() # key is case insensitive\n val = self._string_subst_partial(val)\n if val.startswith('\"') or val.lower() not in self.bib_database.strings:\n self.bib_database.strings[key] = val.strip('\"')\n else:\n self.bib_database.strings[key] = self.bib_database.strings[val.lower()]\n logger.debug('Return a dict')\n return d\n\n # for each line in record\n logger.debug('Split the record of its lines and treat them')\n kvs = [i.strip() for i in record.split(',\\n')]\n inkey = \"\"\n inval = \"\"\n for kv in kvs:\n logger.debug('Inspect: %s', kv)\n # TODO: We may check that the keyword belongs to a known type\n if kv.startswith('@') and not inkey:\n # it is the start of the record - set the bibtype and citekey (id)\n logger.debug('Line starts with @ and the key is not stored yet.')\n bibtype, id = kv.split('{', 1)\n bibtype = self._add_key(bibtype)\n id = id.strip('}').strip(',')\n logger.debug('bibtype = %s', bibtype)\n logger.debug('id = %s', id)\n if self.ignore_nonstandard_types and bibtype not in ('article',\n 'book',\n 'booklet',\n 'conference',\n 'inbook',\n 'incollection',\n 'inproceedings',\n 'manual',\n 'mastersthesis',\n 'misc',\n 'phdthesis',\n 'proceedings',\n 'techreport',\n 'unpublished'):\n logger.warning('Entry type %s not standard. Not considered.', bibtype)\n break\n elif '=' in kv and not inkey:\n # it is a line with a key value pair on it\n logger.debug('Line contains a key-pair value and the key is not stored yet.')\n key, val = [i.strip() for i in kv.split('=', 1)]\n key = self._add_key(key)\n val = self._string_subst_partial(val)\n # if it looks like the value spans lines, store details for next loop\n if (val.count('{') != val.count('}')) or (val.startswith('\"') and not val.replace('}', '').endswith('\"')):\n logger.debug('The line is not ending the record.')\n inkey = key\n inval = val\n else:\n logger.debug('The line is the end of the record.')\n d[key] = self._add_val(val)\n elif inkey:\n logger.debug('Continues the previous line to complete the key pair value...')\n # if this line continues the value from a previous line, append\n inval += ', ' + kv\n # if it looks like this line finishes the value, store it and clear for next loop\n if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('\"') and inval.endswith('\"')):\n logger.debug('This line represents the end of the current key-pair value')\n d[inkey] = self._add_val(inval)\n inkey = \"\"\n inval = \"\"\n else:\n logger.debug('This line does NOT represent the end of the current key-pair value')\n\n logger.debug('All lines have been treated')\n if not d:\n logger.debug('The dict is empty, return it.')\n return d\n\n d['ENTRYTYPE'] = bibtype\n d['ID'] = id\n\n if customization is None:\n logger.debug('No customization to apply, return dict')\n return d\n else:\n # apply any customizations to the record object then return it\n logger.debug('Apply customizations and return dict')\n return customization(d)\n\n def _strip_quotes(self, val):\n \"\"\"Strip double quotes enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip quotes')\n val = val.strip()\n if val.startswith('\"') and val.endswith('\"'):\n return val[1:-1]\n return val\n\n def _strip_braces(self, val):\n \"\"\"Strip braces enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip braces')\n val = val.strip()\n if val.startswith('{') and val.endswith('}') and self._full_span(val):\n return val[1:-1]\n return val\n\n def _full_span(self, val):\n cnt = 0\n for i in range(0, len(val)):\n if val[i] == '{':\n cnt += 1\n elif val[i] == '}':\n cnt -= 1\n if cnt == 0:\n break\n if i == len(val) - 1:\n return True\n else:\n return False\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower() in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n\n # TODO?: Does not match two subsequent variables or strings, such as \"start\" # foo # bar # \"end\" or \"start\" # \"end\".\n # TODO: Does not support braces instead of quotes, e.g.: {start} # foo # {bar}\n # TODO: Does not support strings like: \"te#s#t\"\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == \"{}\":\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"import sys\nimport logging\nimport io\nimport re\nfrom bibtexparser.bibdatabase import BibDatabase\nlogger = logging.getLogger(__name__)\n__all__ = ['BibTexParser']\nif sys.version_info >= (3, 0):\n from io import StringIO\n ustr = str\nelse:\n from StringIO import StringIO\n ustr = unicode\n\n\nclass BibTexParser(object):\n \"\"\"\n A parser for reading BibTeX bibliographic data files.\n\n Example::\n\n from bibtexparser.bparser import BibTexParser\n\n bibtex_str = ...\n\n parser = BibTexParser()\n parser.ignore_nonstandard_types = False\n parser.homogenise_fields = False\n bib_database = bibtexparser.loads(bibtex_str, parser)\n \"\"\"\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n\n def __init__(self):\n \"\"\"\n Creates a parser for rading BibTeX files\n\n :return: parser\n :rtype: `BibTexParser`\n \"\"\"\n self.bib_database = BibDatabase()\n self.customization = None\n self.ignore_nonstandard_types = True\n self.homogenise_fields = True\n self.encoding = 'utf8'\n self.alt_dict = {'keyw': 'keyword', 'keywords': 'keyword',\n 'authors': 'author', 'editors': 'editor', 'url': 'link', 'urls':\n 'link', 'links': 'link', 'subjects': 'subject'}\n self.replace_all_re = re.compile(\n '((?P<pre>\"?)\\\\s*(#|^)\\\\s*(?P<id>[^\\\\d\\\\W]\\\\w*)\\\\s*(#|$)\\\\s*(?P<post>\"?))'\n , re.UNICODE)\n\n def _bibtex_file_obj(self, bibtex_str):\n byte = ''\n if not isinstance(byte, ustr):\n byte = ustr('', self.encoding, 'ignore')\n if bibtex_str[:3] == byte:\n bibtex_str = bibtex_str[3:]\n return StringIO(bibtex_str)\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n\n def _parse_records(self, customization=None):\n \"\"\"Parse the bibtex into a list of records.\n\n :param customization: a function\n \"\"\"\n\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != '':\n logger.debug(\"The record is not empty. Let's parse it.\")\n parsed = self._parse_record(record, customization=customization\n )\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n records = []\n record = ''\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n line = line.lstrip()\n logger.debug('Line starts with @')\n _add_parsed_record(record, records)\n logger.debug('The record is set to empty')\n record = ''\n record += line\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records\n\n def _parse_record(self, record, customization=None):\n \"\"\"Parse a record.\n\n * tidy whitespace and other rubbish\n * parse out the bibtype and citekey\n * find all the key-value pairs it contains\n\n :param record: a record\n :param customization: a function\n\n :returns: dict --\n \"\"\"\n d = {}\n if not record.startswith('@'):\n logger.debug('The record does not start with @. Return empty dict.'\n )\n return {}\n if record.lower().startswith('@comment'):\n logger.debug('The record startswith @comment')\n logger.debug('Store comment in list of comments')\n self.bib_database.comments.append(re.search('\\\\{(.*)\\\\}',\n record, re.DOTALL).group(1))\n logger.debug('Return an empty dict')\n return {}\n if record.lower().startswith('@preamble'):\n logger.debug('The record startswith @preamble')\n logger.debug('Store preamble in list of preambles')\n self.bib_database.preambles.append(re.search('\\\\{(.*)\\\\}',\n record, re.DOTALL).group(1))\n logger.debug('Return an empty dict')\n return {}\n record = '\\n'.join([i.strip() for i in record.split('\\n')])\n if '}\\n' in record:\n logger.debug('}\\\\n detected in the record. Clean up.')\n record = record.replace('\\r\\n', '\\n').replace('\\r', '\\n').rstrip(\n '\\n')\n if record.endswith('}\\n}') or record.endswith('}}'):\n logger.debug(\n 'Missing coma in the last line of the record. Fix it.')\n record = re.sub('}(\\n|)}$', '},\\n}', record)\n if record.lower().startswith('@string'):\n logger.debug('The record startswith @string')\n key, val = [i.strip().strip('{').strip('}').replace('\\n', ' ') for\n i in record.split('{', 1)[1].strip('\\n').strip(',').strip(\n '}').split('=')]\n key = key.lower()\n val = self._string_subst_partial(val)\n if val.startswith('\"') or val.lower(\n ) not in self.bib_database.strings:\n self.bib_database.strings[key] = val.strip('\"')\n else:\n self.bib_database.strings[key] = self.bib_database.strings[val\n .lower()]\n logger.debug('Return a dict')\n return d\n logger.debug('Split the record of its lines and treat them')\n kvs = [i.strip() for i in record.split(',\\n')]\n inkey = ''\n inval = ''\n for kv in kvs:\n logger.debug('Inspect: %s', kv)\n if kv.startswith('@') and not inkey:\n logger.debug(\n 'Line starts with @ and the key is not stored yet.')\n bibtype, id = kv.split('{', 1)\n bibtype = self._add_key(bibtype)\n id = id.strip('}').strip(',')\n logger.debug('bibtype = %s', bibtype)\n logger.debug('id = %s', id)\n if self.ignore_nonstandard_types and bibtype not in ('article',\n 'book', 'booklet', 'conference', 'inbook',\n 'incollection', 'inproceedings', 'manual',\n 'mastersthesis', 'misc', 'phdthesis', 'proceedings',\n 'techreport', 'unpublished'):\n logger.warning(\n 'Entry type %s not standard. Not considered.', bibtype)\n break\n elif '=' in kv and not inkey:\n logger.debug(\n 'Line contains a key-pair value and the key is not stored yet.'\n )\n key, val = [i.strip() for i in kv.split('=', 1)]\n key = self._add_key(key)\n val = self._string_subst_partial(val)\n if val.count('{') != val.count('}') or val.startswith('\"'\n ) and not val.replace('}', '').endswith('\"'):\n logger.debug('The line is not ending the record.')\n inkey = key\n inval = val\n else:\n logger.debug('The line is the end of the record.')\n d[key] = self._add_val(val)\n elif inkey:\n logger.debug(\n 'Continues the previous line to complete the key pair value...'\n )\n inval += ', ' + kv\n if inval.startswith('{') and inval.endswith('}'\n ) or inval.startswith('\"') and inval.endswith('\"'):\n logger.debug(\n 'This line represents the end of the current key-pair value'\n )\n d[inkey] = self._add_val(inval)\n inkey = ''\n inval = ''\n else:\n logger.debug(\n 'This line does NOT represent the end of the current key-pair value'\n )\n logger.debug('All lines have been treated')\n if not d:\n logger.debug('The dict is empty, return it.')\n return d\n d['ENTRYTYPE'] = bibtype\n d['ID'] = id\n if customization is None:\n logger.debug('No customization to apply, return dict')\n return d\n else:\n logger.debug('Apply customizations and return dict')\n return customization(d)\n\n def _strip_quotes(self, val):\n \"\"\"Strip double quotes enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip quotes')\n val = val.strip()\n if val.startswith('\"') and val.endswith('\"'):\n return val[1:-1]\n return val\n\n def _strip_braces(self, val):\n \"\"\"Strip braces enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip braces')\n val = val.strip()\n if val.startswith('{') and val.endswith('}') and self._full_span(val):\n return val[1:-1]\n return val\n\n def _full_span(self, val):\n cnt = 0\n for i in range(0, len(val)):\n if val[i] == '{':\n cnt += 1\n elif val[i] == '}':\n cnt -= 1\n if cnt == 0:\n break\n if i == len(val) - 1:\n return True\n else:\n return False\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower(\n ) in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\nlogger = logging.getLogger(__name__)\n__all__ = ['BibTexParser']\nif sys.version_info >= (3, 0):\n from io import StringIO\n ustr = str\nelse:\n from StringIO import StringIO\n ustr = unicode\n\n\nclass BibTexParser(object):\n \"\"\"\n A parser for reading BibTeX bibliographic data files.\n\n Example::\n\n from bibtexparser.bparser import BibTexParser\n\n bibtex_str = ...\n\n parser = BibTexParser()\n parser.ignore_nonstandard_types = False\n parser.homogenise_fields = False\n bib_database = bibtexparser.loads(bibtex_str, parser)\n \"\"\"\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n\n def __init__(self):\n \"\"\"\n Creates a parser for rading BibTeX files\n\n :return: parser\n :rtype: `BibTexParser`\n \"\"\"\n self.bib_database = BibDatabase()\n self.customization = None\n self.ignore_nonstandard_types = True\n self.homogenise_fields = True\n self.encoding = 'utf8'\n self.alt_dict = {'keyw': 'keyword', 'keywords': 'keyword',\n 'authors': 'author', 'editors': 'editor', 'url': 'link', 'urls':\n 'link', 'links': 'link', 'subjects': 'subject'}\n self.replace_all_re = re.compile(\n '((?P<pre>\"?)\\\\s*(#|^)\\\\s*(?P<id>[^\\\\d\\\\W]\\\\w*)\\\\s*(#|$)\\\\s*(?P<post>\"?))'\n , re.UNICODE)\n\n def _bibtex_file_obj(self, bibtex_str):\n byte = ''\n if not isinstance(byte, ustr):\n byte = ustr('', self.encoding, 'ignore')\n if bibtex_str[:3] == byte:\n bibtex_str = bibtex_str[3:]\n return StringIO(bibtex_str)\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n\n def _parse_records(self, customization=None):\n \"\"\"Parse the bibtex into a list of records.\n\n :param customization: a function\n \"\"\"\n\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != '':\n logger.debug(\"The record is not empty. Let's parse it.\")\n parsed = self._parse_record(record, customization=customization\n )\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n records = []\n record = ''\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n line = line.lstrip()\n logger.debug('Line starts with @')\n _add_parsed_record(record, records)\n logger.debug('The record is set to empty')\n record = ''\n record += line\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records\n\n def _parse_record(self, record, customization=None):\n \"\"\"Parse a record.\n\n * tidy whitespace and other rubbish\n * parse out the bibtype and citekey\n * find all the key-value pairs it contains\n\n :param record: a record\n :param customization: a function\n\n :returns: dict --\n \"\"\"\n d = {}\n if not record.startswith('@'):\n logger.debug('The record does not start with @. Return empty dict.'\n )\n return {}\n if record.lower().startswith('@comment'):\n logger.debug('The record startswith @comment')\n logger.debug('Store comment in list of comments')\n self.bib_database.comments.append(re.search('\\\\{(.*)\\\\}',\n record, re.DOTALL).group(1))\n logger.debug('Return an empty dict')\n return {}\n if record.lower().startswith('@preamble'):\n logger.debug('The record startswith @preamble')\n logger.debug('Store preamble in list of preambles')\n self.bib_database.preambles.append(re.search('\\\\{(.*)\\\\}',\n record, re.DOTALL).group(1))\n logger.debug('Return an empty dict')\n return {}\n record = '\\n'.join([i.strip() for i in record.split('\\n')])\n if '}\\n' in record:\n logger.debug('}\\\\n detected in the record. Clean up.')\n record = record.replace('\\r\\n', '\\n').replace('\\r', '\\n').rstrip(\n '\\n')\n if record.endswith('}\\n}') or record.endswith('}}'):\n logger.debug(\n 'Missing coma in the last line of the record. Fix it.')\n record = re.sub('}(\\n|)}$', '},\\n}', record)\n if record.lower().startswith('@string'):\n logger.debug('The record startswith @string')\n key, val = [i.strip().strip('{').strip('}').replace('\\n', ' ') for\n i in record.split('{', 1)[1].strip('\\n').strip(',').strip(\n '}').split('=')]\n key = key.lower()\n val = self._string_subst_partial(val)\n if val.startswith('\"') or val.lower(\n ) not in self.bib_database.strings:\n self.bib_database.strings[key] = val.strip('\"')\n else:\n self.bib_database.strings[key] = self.bib_database.strings[val\n .lower()]\n logger.debug('Return a dict')\n return d\n logger.debug('Split the record of its lines and treat them')\n kvs = [i.strip() for i in record.split(',\\n')]\n inkey = ''\n inval = ''\n for kv in kvs:\n logger.debug('Inspect: %s', kv)\n if kv.startswith('@') and not inkey:\n logger.debug(\n 'Line starts with @ and the key is not stored yet.')\n bibtype, id = kv.split('{', 1)\n bibtype = self._add_key(bibtype)\n id = id.strip('}').strip(',')\n logger.debug('bibtype = %s', bibtype)\n logger.debug('id = %s', id)\n if self.ignore_nonstandard_types and bibtype not in ('article',\n 'book', 'booklet', 'conference', 'inbook',\n 'incollection', 'inproceedings', 'manual',\n 'mastersthesis', 'misc', 'phdthesis', 'proceedings',\n 'techreport', 'unpublished'):\n logger.warning(\n 'Entry type %s not standard. Not considered.', bibtype)\n break\n elif '=' in kv and not inkey:\n logger.debug(\n 'Line contains a key-pair value and the key is not stored yet.'\n )\n key, val = [i.strip() for i in kv.split('=', 1)]\n key = self._add_key(key)\n val = self._string_subst_partial(val)\n if val.count('{') != val.count('}') or val.startswith('\"'\n ) and not val.replace('}', '').endswith('\"'):\n logger.debug('The line is not ending the record.')\n inkey = key\n inval = val\n else:\n logger.debug('The line is the end of the record.')\n d[key] = self._add_val(val)\n elif inkey:\n logger.debug(\n 'Continues the previous line to complete the key pair value...'\n )\n inval += ', ' + kv\n if inval.startswith('{') and inval.endswith('}'\n ) or inval.startswith('\"') and inval.endswith('\"'):\n logger.debug(\n 'This line represents the end of the current key-pair value'\n )\n d[inkey] = self._add_val(inval)\n inkey = ''\n inval = ''\n else:\n logger.debug(\n 'This line does NOT represent the end of the current key-pair value'\n )\n logger.debug('All lines have been treated')\n if not d:\n logger.debug('The dict is empty, return it.')\n return d\n d['ENTRYTYPE'] = bibtype\n d['ID'] = id\n if customization is None:\n logger.debug('No customization to apply, return dict')\n return d\n else:\n logger.debug('Apply customizations and return dict')\n return customization(d)\n\n def _strip_quotes(self, val):\n \"\"\"Strip double quotes enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip quotes')\n val = val.strip()\n if val.startswith('\"') and val.endswith('\"'):\n return val[1:-1]\n return val\n\n def _strip_braces(self, val):\n \"\"\"Strip braces enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip braces')\n val = val.strip()\n if val.startswith('{') and val.endswith('}') and self._full_span(val):\n return val[1:-1]\n return val\n\n def _full_span(self, val):\n cnt = 0\n for i in range(0, len(val)):\n if val[i] == '{':\n cnt += 1\n elif val[i] == '}':\n cnt -= 1\n if cnt == 0:\n break\n if i == len(val) - 1:\n return True\n else:\n return False\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower(\n ) in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\nif sys.version_info >= (3, 0):\n from io import StringIO\n ustr = str\nelse:\n from StringIO import StringIO\n ustr = unicode\n\n\nclass BibTexParser(object):\n \"\"\"\n A parser for reading BibTeX bibliographic data files.\n\n Example::\n\n from bibtexparser.bparser import BibTexParser\n\n bibtex_str = ...\n\n parser = BibTexParser()\n parser.ignore_nonstandard_types = False\n parser.homogenise_fields = False\n bib_database = bibtexparser.loads(bibtex_str, parser)\n \"\"\"\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n\n def __init__(self):\n \"\"\"\n Creates a parser for rading BibTeX files\n\n :return: parser\n :rtype: `BibTexParser`\n \"\"\"\n self.bib_database = BibDatabase()\n self.customization = None\n self.ignore_nonstandard_types = True\n self.homogenise_fields = True\n self.encoding = 'utf8'\n self.alt_dict = {'keyw': 'keyword', 'keywords': 'keyword',\n 'authors': 'author', 'editors': 'editor', 'url': 'link', 'urls':\n 'link', 'links': 'link', 'subjects': 'subject'}\n self.replace_all_re = re.compile(\n '((?P<pre>\"?)\\\\s*(#|^)\\\\s*(?P<id>[^\\\\d\\\\W]\\\\w*)\\\\s*(#|$)\\\\s*(?P<post>\"?))'\n , re.UNICODE)\n\n def _bibtex_file_obj(self, bibtex_str):\n byte = ''\n if not isinstance(byte, ustr):\n byte = ustr('', self.encoding, 'ignore')\n if bibtex_str[:3] == byte:\n bibtex_str = bibtex_str[3:]\n return StringIO(bibtex_str)\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n\n def _parse_records(self, customization=None):\n \"\"\"Parse the bibtex into a list of records.\n\n :param customization: a function\n \"\"\"\n\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != '':\n logger.debug(\"The record is not empty. Let's parse it.\")\n parsed = self._parse_record(record, customization=customization\n )\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n records = []\n record = ''\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n line = line.lstrip()\n logger.debug('Line starts with @')\n _add_parsed_record(record, records)\n logger.debug('The record is set to empty')\n record = ''\n record += line\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records\n\n def _parse_record(self, record, customization=None):\n \"\"\"Parse a record.\n\n * tidy whitespace and other rubbish\n * parse out the bibtype and citekey\n * find all the key-value pairs it contains\n\n :param record: a record\n :param customization: a function\n\n :returns: dict --\n \"\"\"\n d = {}\n if not record.startswith('@'):\n logger.debug('The record does not start with @. Return empty dict.'\n )\n return {}\n if record.lower().startswith('@comment'):\n logger.debug('The record startswith @comment')\n logger.debug('Store comment in list of comments')\n self.bib_database.comments.append(re.search('\\\\{(.*)\\\\}',\n record, re.DOTALL).group(1))\n logger.debug('Return an empty dict')\n return {}\n if record.lower().startswith('@preamble'):\n logger.debug('The record startswith @preamble')\n logger.debug('Store preamble in list of preambles')\n self.bib_database.preambles.append(re.search('\\\\{(.*)\\\\}',\n record, re.DOTALL).group(1))\n logger.debug('Return an empty dict')\n return {}\n record = '\\n'.join([i.strip() for i in record.split('\\n')])\n if '}\\n' in record:\n logger.debug('}\\\\n detected in the record. Clean up.')\n record = record.replace('\\r\\n', '\\n').replace('\\r', '\\n').rstrip(\n '\\n')\n if record.endswith('}\\n}') or record.endswith('}}'):\n logger.debug(\n 'Missing coma in the last line of the record. Fix it.')\n record = re.sub('}(\\n|)}$', '},\\n}', record)\n if record.lower().startswith('@string'):\n logger.debug('The record startswith @string')\n key, val = [i.strip().strip('{').strip('}').replace('\\n', ' ') for\n i in record.split('{', 1)[1].strip('\\n').strip(',').strip(\n '}').split('=')]\n key = key.lower()\n val = self._string_subst_partial(val)\n if val.startswith('\"') or val.lower(\n ) not in self.bib_database.strings:\n self.bib_database.strings[key] = val.strip('\"')\n else:\n self.bib_database.strings[key] = self.bib_database.strings[val\n .lower()]\n logger.debug('Return a dict')\n return d\n logger.debug('Split the record of its lines and treat them')\n kvs = [i.strip() for i in record.split(',\\n')]\n inkey = ''\n inval = ''\n for kv in kvs:\n logger.debug('Inspect: %s', kv)\n if kv.startswith('@') and not inkey:\n logger.debug(\n 'Line starts with @ and the key is not stored yet.')\n bibtype, id = kv.split('{', 1)\n bibtype = self._add_key(bibtype)\n id = id.strip('}').strip(',')\n logger.debug('bibtype = %s', bibtype)\n logger.debug('id = %s', id)\n if self.ignore_nonstandard_types and bibtype not in ('article',\n 'book', 'booklet', 'conference', 'inbook',\n 'incollection', 'inproceedings', 'manual',\n 'mastersthesis', 'misc', 'phdthesis', 'proceedings',\n 'techreport', 'unpublished'):\n logger.warning(\n 'Entry type %s not standard. Not considered.', bibtype)\n break\n elif '=' in kv and not inkey:\n logger.debug(\n 'Line contains a key-pair value and the key is not stored yet.'\n )\n key, val = [i.strip() for i in kv.split('=', 1)]\n key = self._add_key(key)\n val = self._string_subst_partial(val)\n if val.count('{') != val.count('}') or val.startswith('\"'\n ) and not val.replace('}', '').endswith('\"'):\n logger.debug('The line is not ending the record.')\n inkey = key\n inval = val\n else:\n logger.debug('The line is the end of the record.')\n d[key] = self._add_val(val)\n elif inkey:\n logger.debug(\n 'Continues the previous line to complete the key pair value...'\n )\n inval += ', ' + kv\n if inval.startswith('{') and inval.endswith('}'\n ) or inval.startswith('\"') and inval.endswith('\"'):\n logger.debug(\n 'This line represents the end of the current key-pair value'\n )\n d[inkey] = self._add_val(inval)\n inkey = ''\n inval = ''\n else:\n logger.debug(\n 'This line does NOT represent the end of the current key-pair value'\n )\n logger.debug('All lines have been treated')\n if not d:\n logger.debug('The dict is empty, return it.')\n return d\n d['ENTRYTYPE'] = bibtype\n d['ID'] = id\n if customization is None:\n logger.debug('No customization to apply, return dict')\n return d\n else:\n logger.debug('Apply customizations and return dict')\n return customization(d)\n\n def _strip_quotes(self, val):\n \"\"\"Strip double quotes enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip quotes')\n val = val.strip()\n if val.startswith('\"') and val.endswith('\"'):\n return val[1:-1]\n return val\n\n def _strip_braces(self, val):\n \"\"\"Strip braces enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip braces')\n val = val.strip()\n if val.startswith('{') and val.endswith('}') and self._full_span(val):\n return val[1:-1]\n return val\n\n def _full_span(self, val):\n cnt = 0\n for i in range(0, len(val)):\n if val[i] == '{':\n cnt += 1\n elif val[i] == '}':\n cnt -= 1\n if cnt == 0:\n break\n if i == len(val) - 1:\n return True\n else:\n return False\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower(\n ) in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n \"\"\"\n A parser for reading BibTeX bibliographic data files.\n\n Example::\n\n from bibtexparser.bparser import BibTexParser\n\n bibtex_str = ...\n\n parser = BibTexParser()\n parser.ignore_nonstandard_types = False\n parser.homogenise_fields = False\n bib_database = bibtexparser.loads(bibtex_str, parser)\n \"\"\"\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n\n def __init__(self):\n \"\"\"\n Creates a parser for rading BibTeX files\n\n :return: parser\n :rtype: `BibTexParser`\n \"\"\"\n self.bib_database = BibDatabase()\n self.customization = None\n self.ignore_nonstandard_types = True\n self.homogenise_fields = True\n self.encoding = 'utf8'\n self.alt_dict = {'keyw': 'keyword', 'keywords': 'keyword',\n 'authors': 'author', 'editors': 'editor', 'url': 'link', 'urls':\n 'link', 'links': 'link', 'subjects': 'subject'}\n self.replace_all_re = re.compile(\n '((?P<pre>\"?)\\\\s*(#|^)\\\\s*(?P<id>[^\\\\d\\\\W]\\\\w*)\\\\s*(#|$)\\\\s*(?P<post>\"?))'\n , re.UNICODE)\n\n def _bibtex_file_obj(self, bibtex_str):\n byte = ''\n if not isinstance(byte, ustr):\n byte = ustr('', self.encoding, 'ignore')\n if bibtex_str[:3] == byte:\n bibtex_str = bibtex_str[3:]\n return StringIO(bibtex_str)\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n\n def _parse_records(self, customization=None):\n \"\"\"Parse the bibtex into a list of records.\n\n :param customization: a function\n \"\"\"\n\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != '':\n logger.debug(\"The record is not empty. Let's parse it.\")\n parsed = self._parse_record(record, customization=customization\n )\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n records = []\n record = ''\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n line = line.lstrip()\n logger.debug('Line starts with @')\n _add_parsed_record(record, records)\n logger.debug('The record is set to empty')\n record = ''\n record += line\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records\n\n def _parse_record(self, record, customization=None):\n \"\"\"Parse a record.\n\n * tidy whitespace and other rubbish\n * parse out the bibtype and citekey\n * find all the key-value pairs it contains\n\n :param record: a record\n :param customization: a function\n\n :returns: dict --\n \"\"\"\n d = {}\n if not record.startswith('@'):\n logger.debug('The record does not start with @. Return empty dict.'\n )\n return {}\n if record.lower().startswith('@comment'):\n logger.debug('The record startswith @comment')\n logger.debug('Store comment in list of comments')\n self.bib_database.comments.append(re.search('\\\\{(.*)\\\\}',\n record, re.DOTALL).group(1))\n logger.debug('Return an empty dict')\n return {}\n if record.lower().startswith('@preamble'):\n logger.debug('The record startswith @preamble')\n logger.debug('Store preamble in list of preambles')\n self.bib_database.preambles.append(re.search('\\\\{(.*)\\\\}',\n record, re.DOTALL).group(1))\n logger.debug('Return an empty dict')\n return {}\n record = '\\n'.join([i.strip() for i in record.split('\\n')])\n if '}\\n' in record:\n logger.debug('}\\\\n detected in the record. Clean up.')\n record = record.replace('\\r\\n', '\\n').replace('\\r', '\\n').rstrip(\n '\\n')\n if record.endswith('}\\n}') or record.endswith('}}'):\n logger.debug(\n 'Missing coma in the last line of the record. Fix it.')\n record = re.sub('}(\\n|)}$', '},\\n}', record)\n if record.lower().startswith('@string'):\n logger.debug('The record startswith @string')\n key, val = [i.strip().strip('{').strip('}').replace('\\n', ' ') for\n i in record.split('{', 1)[1].strip('\\n').strip(',').strip(\n '}').split('=')]\n key = key.lower()\n val = self._string_subst_partial(val)\n if val.startswith('\"') or val.lower(\n ) not in self.bib_database.strings:\n self.bib_database.strings[key] = val.strip('\"')\n else:\n self.bib_database.strings[key] = self.bib_database.strings[val\n .lower()]\n logger.debug('Return a dict')\n return d\n logger.debug('Split the record of its lines and treat them')\n kvs = [i.strip() for i in record.split(',\\n')]\n inkey = ''\n inval = ''\n for kv in kvs:\n logger.debug('Inspect: %s', kv)\n if kv.startswith('@') and not inkey:\n logger.debug(\n 'Line starts with @ and the key is not stored yet.')\n bibtype, id = kv.split('{', 1)\n bibtype = self._add_key(bibtype)\n id = id.strip('}').strip(',')\n logger.debug('bibtype = %s', bibtype)\n logger.debug('id = %s', id)\n if self.ignore_nonstandard_types and bibtype not in ('article',\n 'book', 'booklet', 'conference', 'inbook',\n 'incollection', 'inproceedings', 'manual',\n 'mastersthesis', 'misc', 'phdthesis', 'proceedings',\n 'techreport', 'unpublished'):\n logger.warning(\n 'Entry type %s not standard. Not considered.', bibtype)\n break\n elif '=' in kv and not inkey:\n logger.debug(\n 'Line contains a key-pair value and the key is not stored yet.'\n )\n key, val = [i.strip() for i in kv.split('=', 1)]\n key = self._add_key(key)\n val = self._string_subst_partial(val)\n if val.count('{') != val.count('}') or val.startswith('\"'\n ) and not val.replace('}', '').endswith('\"'):\n logger.debug('The line is not ending the record.')\n inkey = key\n inval = val\n else:\n logger.debug('The line is the end of the record.')\n d[key] = self._add_val(val)\n elif inkey:\n logger.debug(\n 'Continues the previous line to complete the key pair value...'\n )\n inval += ', ' + kv\n if inval.startswith('{') and inval.endswith('}'\n ) or inval.startswith('\"') and inval.endswith('\"'):\n logger.debug(\n 'This line represents the end of the current key-pair value'\n )\n d[inkey] = self._add_val(inval)\n inkey = ''\n inval = ''\n else:\n logger.debug(\n 'This line does NOT represent the end of the current key-pair value'\n )\n logger.debug('All lines have been treated')\n if not d:\n logger.debug('The dict is empty, return it.')\n return d\n d['ENTRYTYPE'] = bibtype\n d['ID'] = id\n if customization is None:\n logger.debug('No customization to apply, return dict')\n return d\n else:\n logger.debug('Apply customizations and return dict')\n return customization(d)\n\n def _strip_quotes(self, val):\n \"\"\"Strip double quotes enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip quotes')\n val = val.strip()\n if val.startswith('\"') and val.endswith('\"'):\n return val[1:-1]\n return val\n\n def _strip_braces(self, val):\n \"\"\"Strip braces enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip braces')\n val = val.strip()\n if val.startswith('{') and val.endswith('}') and self._full_span(val):\n return val[1:-1]\n return val\n\n def _full_span(self, val):\n cnt = 0\n for i in range(0, len(val)):\n if val[i] == '{':\n cnt += 1\n elif val[i] == '}':\n cnt -= 1\n if cnt == 0:\n break\n if i == len(val) - 1:\n return True\n else:\n return False\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower(\n ) in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n\n def __init__(self):\n \"\"\"\n Creates a parser for rading BibTeX files\n\n :return: parser\n :rtype: `BibTexParser`\n \"\"\"\n self.bib_database = BibDatabase()\n self.customization = None\n self.ignore_nonstandard_types = True\n self.homogenise_fields = True\n self.encoding = 'utf8'\n self.alt_dict = {'keyw': 'keyword', 'keywords': 'keyword',\n 'authors': 'author', 'editors': 'editor', 'url': 'link', 'urls':\n 'link', 'links': 'link', 'subjects': 'subject'}\n self.replace_all_re = re.compile(\n '((?P<pre>\"?)\\\\s*(#|^)\\\\s*(?P<id>[^\\\\d\\\\W]\\\\w*)\\\\s*(#|$)\\\\s*(?P<post>\"?))'\n , re.UNICODE)\n\n def _bibtex_file_obj(self, bibtex_str):\n byte = ''\n if not isinstance(byte, ustr):\n byte = ustr('', self.encoding, 'ignore')\n if bibtex_str[:3] == byte:\n bibtex_str = bibtex_str[3:]\n return StringIO(bibtex_str)\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n\n def _parse_records(self, customization=None):\n \"\"\"Parse the bibtex into a list of records.\n\n :param customization: a function\n \"\"\"\n\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != '':\n logger.debug(\"The record is not empty. Let's parse it.\")\n parsed = self._parse_record(record, customization=customization\n )\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n records = []\n record = ''\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n line = line.lstrip()\n logger.debug('Line starts with @')\n _add_parsed_record(record, records)\n logger.debug('The record is set to empty')\n record = ''\n record += line\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records\n\n def _parse_record(self, record, customization=None):\n \"\"\"Parse a record.\n\n * tidy whitespace and other rubbish\n * parse out the bibtype and citekey\n * find all the key-value pairs it contains\n\n :param record: a record\n :param customization: a function\n\n :returns: dict --\n \"\"\"\n d = {}\n if not record.startswith('@'):\n logger.debug('The record does not start with @. Return empty dict.'\n )\n return {}\n if record.lower().startswith('@comment'):\n logger.debug('The record startswith @comment')\n logger.debug('Store comment in list of comments')\n self.bib_database.comments.append(re.search('\\\\{(.*)\\\\}',\n record, re.DOTALL).group(1))\n logger.debug('Return an empty dict')\n return {}\n if record.lower().startswith('@preamble'):\n logger.debug('The record startswith @preamble')\n logger.debug('Store preamble in list of preambles')\n self.bib_database.preambles.append(re.search('\\\\{(.*)\\\\}',\n record, re.DOTALL).group(1))\n logger.debug('Return an empty dict')\n return {}\n record = '\\n'.join([i.strip() for i in record.split('\\n')])\n if '}\\n' in record:\n logger.debug('}\\\\n detected in the record. Clean up.')\n record = record.replace('\\r\\n', '\\n').replace('\\r', '\\n').rstrip(\n '\\n')\n if record.endswith('}\\n}') or record.endswith('}}'):\n logger.debug(\n 'Missing coma in the last line of the record. Fix it.')\n record = re.sub('}(\\n|)}$', '},\\n}', record)\n if record.lower().startswith('@string'):\n logger.debug('The record startswith @string')\n key, val = [i.strip().strip('{').strip('}').replace('\\n', ' ') for\n i in record.split('{', 1)[1].strip('\\n').strip(',').strip(\n '}').split('=')]\n key = key.lower()\n val = self._string_subst_partial(val)\n if val.startswith('\"') or val.lower(\n ) not in self.bib_database.strings:\n self.bib_database.strings[key] = val.strip('\"')\n else:\n self.bib_database.strings[key] = self.bib_database.strings[val\n .lower()]\n logger.debug('Return a dict')\n return d\n logger.debug('Split the record of its lines and treat them')\n kvs = [i.strip() for i in record.split(',\\n')]\n inkey = ''\n inval = ''\n for kv in kvs:\n logger.debug('Inspect: %s', kv)\n if kv.startswith('@') and not inkey:\n logger.debug(\n 'Line starts with @ and the key is not stored yet.')\n bibtype, id = kv.split('{', 1)\n bibtype = self._add_key(bibtype)\n id = id.strip('}').strip(',')\n logger.debug('bibtype = %s', bibtype)\n logger.debug('id = %s', id)\n if self.ignore_nonstandard_types and bibtype not in ('article',\n 'book', 'booklet', 'conference', 'inbook',\n 'incollection', 'inproceedings', 'manual',\n 'mastersthesis', 'misc', 'phdthesis', 'proceedings',\n 'techreport', 'unpublished'):\n logger.warning(\n 'Entry type %s not standard. Not considered.', bibtype)\n break\n elif '=' in kv and not inkey:\n logger.debug(\n 'Line contains a key-pair value and the key is not stored yet.'\n )\n key, val = [i.strip() for i in kv.split('=', 1)]\n key = self._add_key(key)\n val = self._string_subst_partial(val)\n if val.count('{') != val.count('}') or val.startswith('\"'\n ) and not val.replace('}', '').endswith('\"'):\n logger.debug('The line is not ending the record.')\n inkey = key\n inval = val\n else:\n logger.debug('The line is the end of the record.')\n d[key] = self._add_val(val)\n elif inkey:\n logger.debug(\n 'Continues the previous line to complete the key pair value...'\n )\n inval += ', ' + kv\n if inval.startswith('{') and inval.endswith('}'\n ) or inval.startswith('\"') and inval.endswith('\"'):\n logger.debug(\n 'This line represents the end of the current key-pair value'\n )\n d[inkey] = self._add_val(inval)\n inkey = ''\n inval = ''\n else:\n logger.debug(\n 'This line does NOT represent the end of the current key-pair value'\n )\n logger.debug('All lines have been treated')\n if not d:\n logger.debug('The dict is empty, return it.')\n return d\n d['ENTRYTYPE'] = bibtype\n d['ID'] = id\n if customization is None:\n logger.debug('No customization to apply, return dict')\n return d\n else:\n logger.debug('Apply customizations and return dict')\n return customization(d)\n\n def _strip_quotes(self, val):\n \"\"\"Strip double quotes enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip quotes')\n val = val.strip()\n if val.startswith('\"') and val.endswith('\"'):\n return val[1:-1]\n return val\n\n def _strip_braces(self, val):\n \"\"\"Strip braces enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip braces')\n val = val.strip()\n if val.startswith('{') and val.endswith('}') and self._full_span(val):\n return val[1:-1]\n return val\n\n def _full_span(self, val):\n cnt = 0\n for i in range(0, len(val)):\n if val[i] == '{':\n cnt += 1\n elif val[i] == '}':\n cnt -= 1\n if cnt == 0:\n break\n if i == len(val) - 1:\n return True\n else:\n return False\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower(\n ) in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n\n def __init__(self):\n \"\"\"\n Creates a parser for rading BibTeX files\n\n :return: parser\n :rtype: `BibTexParser`\n \"\"\"\n self.bib_database = BibDatabase()\n self.customization = None\n self.ignore_nonstandard_types = True\n self.homogenise_fields = True\n self.encoding = 'utf8'\n self.alt_dict = {'keyw': 'keyword', 'keywords': 'keyword',\n 'authors': 'author', 'editors': 'editor', 'url': 'link', 'urls':\n 'link', 'links': 'link', 'subjects': 'subject'}\n self.replace_all_re = re.compile(\n '((?P<pre>\"?)\\\\s*(#|^)\\\\s*(?P<id>[^\\\\d\\\\W]\\\\w*)\\\\s*(#|$)\\\\s*(?P<post>\"?))'\n , re.UNICODE)\n\n def _bibtex_file_obj(self, bibtex_str):\n byte = ''\n if not isinstance(byte, ustr):\n byte = ustr('', self.encoding, 'ignore')\n if bibtex_str[:3] == byte:\n bibtex_str = bibtex_str[3:]\n return StringIO(bibtex_str)\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n\n def _parse_records(self, customization=None):\n \"\"\"Parse the bibtex into a list of records.\n\n :param customization: a function\n \"\"\"\n\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != '':\n logger.debug(\"The record is not empty. Let's parse it.\")\n parsed = self._parse_record(record, customization=customization\n )\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n records = []\n record = ''\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n line = line.lstrip()\n logger.debug('Line starts with @')\n _add_parsed_record(record, records)\n logger.debug('The record is set to empty')\n record = ''\n record += line\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records\n\n def _parse_record(self, record, customization=None):\n \"\"\"Parse a record.\n\n * tidy whitespace and other rubbish\n * parse out the bibtype and citekey\n * find all the key-value pairs it contains\n\n :param record: a record\n :param customization: a function\n\n :returns: dict --\n \"\"\"\n d = {}\n if not record.startswith('@'):\n logger.debug('The record does not start with @. Return empty dict.'\n )\n return {}\n if record.lower().startswith('@comment'):\n logger.debug('The record startswith @comment')\n logger.debug('Store comment in list of comments')\n self.bib_database.comments.append(re.search('\\\\{(.*)\\\\}',\n record, re.DOTALL).group(1))\n logger.debug('Return an empty dict')\n return {}\n if record.lower().startswith('@preamble'):\n logger.debug('The record startswith @preamble')\n logger.debug('Store preamble in list of preambles')\n self.bib_database.preambles.append(re.search('\\\\{(.*)\\\\}',\n record, re.DOTALL).group(1))\n logger.debug('Return an empty dict')\n return {}\n record = '\\n'.join([i.strip() for i in record.split('\\n')])\n if '}\\n' in record:\n logger.debug('}\\\\n detected in the record. Clean up.')\n record = record.replace('\\r\\n', '\\n').replace('\\r', '\\n').rstrip(\n '\\n')\n if record.endswith('}\\n}') or record.endswith('}}'):\n logger.debug(\n 'Missing coma in the last line of the record. Fix it.')\n record = re.sub('}(\\n|)}$', '},\\n}', record)\n if record.lower().startswith('@string'):\n logger.debug('The record startswith @string')\n key, val = [i.strip().strip('{').strip('}').replace('\\n', ' ') for\n i in record.split('{', 1)[1].strip('\\n').strip(',').strip(\n '}').split('=')]\n key = key.lower()\n val = self._string_subst_partial(val)\n if val.startswith('\"') or val.lower(\n ) not in self.bib_database.strings:\n self.bib_database.strings[key] = val.strip('\"')\n else:\n self.bib_database.strings[key] = self.bib_database.strings[val\n .lower()]\n logger.debug('Return a dict')\n return d\n logger.debug('Split the record of its lines and treat them')\n kvs = [i.strip() for i in record.split(',\\n')]\n inkey = ''\n inval = ''\n for kv in kvs:\n logger.debug('Inspect: %s', kv)\n if kv.startswith('@') and not inkey:\n logger.debug(\n 'Line starts with @ and the key is not stored yet.')\n bibtype, id = kv.split('{', 1)\n bibtype = self._add_key(bibtype)\n id = id.strip('}').strip(',')\n logger.debug('bibtype = %s', bibtype)\n logger.debug('id = %s', id)\n if self.ignore_nonstandard_types and bibtype not in ('article',\n 'book', 'booklet', 'conference', 'inbook',\n 'incollection', 'inproceedings', 'manual',\n 'mastersthesis', 'misc', 'phdthesis', 'proceedings',\n 'techreport', 'unpublished'):\n logger.warning(\n 'Entry type %s not standard. Not considered.', bibtype)\n break\n elif '=' in kv and not inkey:\n logger.debug(\n 'Line contains a key-pair value and the key is not stored yet.'\n )\n key, val = [i.strip() for i in kv.split('=', 1)]\n key = self._add_key(key)\n val = self._string_subst_partial(val)\n if val.count('{') != val.count('}') or val.startswith('\"'\n ) and not val.replace('}', '').endswith('\"'):\n logger.debug('The line is not ending the record.')\n inkey = key\n inval = val\n else:\n logger.debug('The line is the end of the record.')\n d[key] = self._add_val(val)\n elif inkey:\n logger.debug(\n 'Continues the previous line to complete the key pair value...'\n )\n inval += ', ' + kv\n if inval.startswith('{') and inval.endswith('}'\n ) or inval.startswith('\"') and inval.endswith('\"'):\n logger.debug(\n 'This line represents the end of the current key-pair value'\n )\n d[inkey] = self._add_val(inval)\n inkey = ''\n inval = ''\n else:\n logger.debug(\n 'This line does NOT represent the end of the current key-pair value'\n )\n logger.debug('All lines have been treated')\n if not d:\n logger.debug('The dict is empty, return it.')\n return d\n d['ENTRYTYPE'] = bibtype\n d['ID'] = id\n if customization is None:\n logger.debug('No customization to apply, return dict')\n return d\n else:\n logger.debug('Apply customizations and return dict')\n return customization(d)\n\n def _strip_quotes(self, val):\n \"\"\"Strip double quotes enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip quotes')\n val = val.strip()\n if val.startswith('\"') and val.endswith('\"'):\n return val[1:-1]\n return val\n\n def _strip_braces(self, val):\n \"\"\"Strip braces enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip braces')\n val = val.strip()\n if val.startswith('{') and val.endswith('}') and self._full_span(val):\n return val[1:-1]\n return val\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower(\n ) in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n\n def __init__(self):\n \"\"\"\n Creates a parser for rading BibTeX files\n\n :return: parser\n :rtype: `BibTexParser`\n \"\"\"\n self.bib_database = BibDatabase()\n self.customization = None\n self.ignore_nonstandard_types = True\n self.homogenise_fields = True\n self.encoding = 'utf8'\n self.alt_dict = {'keyw': 'keyword', 'keywords': 'keyword',\n 'authors': 'author', 'editors': 'editor', 'url': 'link', 'urls':\n 'link', 'links': 'link', 'subjects': 'subject'}\n self.replace_all_re = re.compile(\n '((?P<pre>\"?)\\\\s*(#|^)\\\\s*(?P<id>[^\\\\d\\\\W]\\\\w*)\\\\s*(#|$)\\\\s*(?P<post>\"?))'\n , re.UNICODE)\n\n def _bibtex_file_obj(self, bibtex_str):\n byte = ''\n if not isinstance(byte, ustr):\n byte = ustr('', self.encoding, 'ignore')\n if bibtex_str[:3] == byte:\n bibtex_str = bibtex_str[3:]\n return StringIO(bibtex_str)\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n\n def _parse_records(self, customization=None):\n \"\"\"Parse the bibtex into a list of records.\n\n :param customization: a function\n \"\"\"\n\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != '':\n logger.debug(\"The record is not empty. Let's parse it.\")\n parsed = self._parse_record(record, customization=customization\n )\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n records = []\n record = ''\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n line = line.lstrip()\n logger.debug('Line starts with @')\n _add_parsed_record(record, records)\n logger.debug('The record is set to empty')\n record = ''\n record += line\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records\n <function token>\n\n def _strip_quotes(self, val):\n \"\"\"Strip double quotes enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip quotes')\n val = val.strip()\n if val.startswith('\"') and val.endswith('\"'):\n return val[1:-1]\n return val\n\n def _strip_braces(self, val):\n \"\"\"Strip braces enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip braces')\n val = val.strip()\n if val.startswith('{') and val.endswith('}') and self._full_span(val):\n return val[1:-1]\n return val\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower(\n ) in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n\n def __init__(self):\n \"\"\"\n Creates a parser for rading BibTeX files\n\n :return: parser\n :rtype: `BibTexParser`\n \"\"\"\n self.bib_database = BibDatabase()\n self.customization = None\n self.ignore_nonstandard_types = True\n self.homogenise_fields = True\n self.encoding = 'utf8'\n self.alt_dict = {'keyw': 'keyword', 'keywords': 'keyword',\n 'authors': 'author', 'editors': 'editor', 'url': 'link', 'urls':\n 'link', 'links': 'link', 'subjects': 'subject'}\n self.replace_all_re = re.compile(\n '((?P<pre>\"?)\\\\s*(#|^)\\\\s*(?P<id>[^\\\\d\\\\W]\\\\w*)\\\\s*(#|$)\\\\s*(?P<post>\"?))'\n , re.UNICODE)\n <function token>\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n\n def _parse_records(self, customization=None):\n \"\"\"Parse the bibtex into a list of records.\n\n :param customization: a function\n \"\"\"\n\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != '':\n logger.debug(\"The record is not empty. Let's parse it.\")\n parsed = self._parse_record(record, customization=customization\n )\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n records = []\n record = ''\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n line = line.lstrip()\n logger.debug('Line starts with @')\n _add_parsed_record(record, records)\n logger.debug('The record is set to empty')\n record = ''\n record += line\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records\n <function token>\n\n def _strip_quotes(self, val):\n \"\"\"Strip double quotes enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip quotes')\n val = val.strip()\n if val.startswith('\"') and val.endswith('\"'):\n return val[1:-1]\n return val\n\n def _strip_braces(self, val):\n \"\"\"Strip braces enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip braces')\n val = val.strip()\n if val.startswith('{') and val.endswith('}') and self._full_span(val):\n return val[1:-1]\n return val\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower(\n ) in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n\n def __init__(self):\n \"\"\"\n Creates a parser for rading BibTeX files\n\n :return: parser\n :rtype: `BibTexParser`\n \"\"\"\n self.bib_database = BibDatabase()\n self.customization = None\n self.ignore_nonstandard_types = True\n self.homogenise_fields = True\n self.encoding = 'utf8'\n self.alt_dict = {'keyw': 'keyword', 'keywords': 'keyword',\n 'authors': 'author', 'editors': 'editor', 'url': 'link', 'urls':\n 'link', 'links': 'link', 'subjects': 'subject'}\n self.replace_all_re = re.compile(\n '((?P<pre>\"?)\\\\s*(#|^)\\\\s*(?P<id>[^\\\\d\\\\W]\\\\w*)\\\\s*(#|$)\\\\s*(?P<post>\"?))'\n , re.UNICODE)\n <function token>\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n\n def _parse_records(self, customization=None):\n \"\"\"Parse the bibtex into a list of records.\n\n :param customization: a function\n \"\"\"\n\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != '':\n logger.debug(\"The record is not empty. Let's parse it.\")\n parsed = self._parse_record(record, customization=customization\n )\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n records = []\n record = ''\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n line = line.lstrip()\n logger.debug('Line starts with @')\n _add_parsed_record(record, records)\n logger.debug('The record is set to empty')\n record = ''\n record += line\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records\n <function token>\n <function token>\n\n def _strip_braces(self, val):\n \"\"\"Strip braces enclosing string\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Strip braces')\n val = val.strip()\n if val.startswith('{') and val.endswith('}') and self._full_span(val):\n return val[1:-1]\n return val\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower(\n ) in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n\n def __init__(self):\n \"\"\"\n Creates a parser for rading BibTeX files\n\n :return: parser\n :rtype: `BibTexParser`\n \"\"\"\n self.bib_database = BibDatabase()\n self.customization = None\n self.ignore_nonstandard_types = True\n self.homogenise_fields = True\n self.encoding = 'utf8'\n self.alt_dict = {'keyw': 'keyword', 'keywords': 'keyword',\n 'authors': 'author', 'editors': 'editor', 'url': 'link', 'urls':\n 'link', 'links': 'link', 'subjects': 'subject'}\n self.replace_all_re = re.compile(\n '((?P<pre>\"?)\\\\s*(#|^)\\\\s*(?P<id>[^\\\\d\\\\W]\\\\w*)\\\\s*(#|$)\\\\s*(?P<post>\"?))'\n , re.UNICODE)\n <function token>\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n\n def _parse_records(self, customization=None):\n \"\"\"Parse the bibtex into a list of records.\n\n :param customization: a function\n \"\"\"\n\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != '':\n logger.debug(\"The record is not empty. Let's parse it.\")\n parsed = self._parse_record(record, customization=customization\n )\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n records = []\n record = ''\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n line = line.lstrip()\n logger.debug('Line starts with @')\n _add_parsed_record(record, records)\n logger.debug('The record is set to empty')\n record = ''\n record += line\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower(\n ) in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n\n def __init__(self):\n \"\"\"\n Creates a parser for rading BibTeX files\n\n :return: parser\n :rtype: `BibTexParser`\n \"\"\"\n self.bib_database = BibDatabase()\n self.customization = None\n self.ignore_nonstandard_types = True\n self.homogenise_fields = True\n self.encoding = 'utf8'\n self.alt_dict = {'keyw': 'keyword', 'keywords': 'keyword',\n 'authors': 'author', 'editors': 'editor', 'url': 'link', 'urls':\n 'link', 'links': 'link', 'subjects': 'subject'}\n self.replace_all_re = re.compile(\n '((?P<pre>\"?)\\\\s*(#|^)\\\\s*(?P<id>[^\\\\d\\\\W]\\\\w*)\\\\s*(#|$)\\\\s*(?P<post>\"?))'\n , re.UNICODE)\n <function token>\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower(\n ) in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n <function token>\n <function token>\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n\n def _string_subst_partial(self, val):\n \"\"\" Substitute string definitions inside larger expressions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower(\n ) in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n return self.replace_all_re.sub(repl, val)\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n <function token>\n <function token>\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n\n def parse_file(self, file):\n \"\"\"Parse a BibTeX file into an object\n\n :param file: BibTeX file or file-like object\n :type: file\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n return self.parse(file.read())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n <function token>\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n\n def __new__(cls, data=None, customization=None,\n ignore_nonstandard_types=True, homogenise_fields=True):\n \"\"\"\n To catch the old API structure in which creating the parser would immediately parse and return data.\n \"\"\"\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)\n <function token>\n <function token>\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n <function token>\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def parse(self, bibtex_str):\n \"\"\"Parse a BibTeX string into an object\n\n :param bibtex_str: BibTeX string\n :type: str or unicode\n :return: bibliographic database\n :rtype: BibDatabase\n \"\"\"\n self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)\n self._parse_records(customization=self.customization)\n return self.bib_database\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n <function token>\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n <function token>\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n\n def _add_key(self, key):\n \"\"\" Add a key and homogeneize alternative forms.\n\n :param key: a key\n :type key: string\n :returns: string -- value\n \"\"\"\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n <function token>\n\n def _add_val(self, val):\n \"\"\" Clean instring before adding to dictionary\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n if not val or val == '{}':\n return ''\n val = self._strip_braces(val)\n val = self._strip_quotes(val)\n val = self._strip_braces(val)\n val = self._string_subst(val)\n return val\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _string_subst(self, val):\n \"\"\" Substitute string definitions\n\n :param val: a value\n :type val: string\n :returns: string -- value\n \"\"\"\n logger.debug('Substitute string definitions')\n if not val:\n return ''\n for k in list(self.bib_database.strings.keys()):\n if val.lower() == k:\n val = self.bib_database.strings[k]\n if not isinstance(val, ustr):\n val = ustr(val, self.encoding, 'ignore')\n return val\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass BibTexParser(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n<class token>\n"
] | false |
98,367 |
f11e7a7fce37d5ecc0b031084277a3e7f406b743
|
# Copyright 2014 - Dark Secret Software Inc.
# All Rights Reserved.
def get_event():
return {"event_type": "compute.instance.exists",
'_context_request_id': "req-1234",
'_context_project_id': "tenant-1",
"timestamp": "2013-06-20 17:31:57.939614",
"publisher_id": "compute.global.preprod-ord.ohthree.com",
"payload": {
'instance_id': "ins-4567",
"status": "saving",
"container_format": "ovf",
"properties": {
"image_type": "snapshot",
},
"options": [
"one",
"two",
{"server": "bart",
"region": "springfield"},
"three"
],
"tenant": "5877054",
"old_state": 'old_state',
"old_task_state": 'old_task',
"image_meta": {
"org.openstack__1__architecture": 'os_arch',
"org.openstack__1__os_distro": 'os_distro',
"org.openstack__1__os_version": 'os_version',
"com.rackspace__1__options": 'rax_opt',
},
"state": 'state',
"new_task_state": 'task',
"bandwidth": {
"private": {"bw_in": 0, "bw_out": 264902},
"public": {"bw_in": 0, "bw_out": 1697240969}
}
}
}
|
[
"# Copyright 2014 - Dark Secret Software Inc.\n# All Rights Reserved.\n\n\ndef get_event():\n return {\"event_type\": \"compute.instance.exists\",\n '_context_request_id': \"req-1234\",\n '_context_project_id': \"tenant-1\",\n \"timestamp\": \"2013-06-20 17:31:57.939614\",\n \"publisher_id\": \"compute.global.preprod-ord.ohthree.com\",\n \"payload\": {\n 'instance_id': \"ins-4567\",\n \"status\": \"saving\",\n \"container_format\": \"ovf\",\n \"properties\": {\n \"image_type\": \"snapshot\",\n },\n \"options\": [\n \"one\",\n \"two\",\n {\"server\": \"bart\",\n \"region\": \"springfield\"},\n \"three\"\n ],\n \"tenant\": \"5877054\",\n \"old_state\": 'old_state',\n \"old_task_state\": 'old_task',\n \"image_meta\": {\n \"org.openstack__1__architecture\": 'os_arch',\n \"org.openstack__1__os_distro\": 'os_distro',\n \"org.openstack__1__os_version\": 'os_version',\n \"com.rackspace__1__options\": 'rax_opt',\n },\n \"state\": 'state',\n \"new_task_state\": 'task',\n \"bandwidth\": {\n \"private\": {\"bw_in\": 0, \"bw_out\": 264902},\n \"public\": {\"bw_in\": 0, \"bw_out\": 1697240969}\n }\n }\n }\n",
"def get_event():\n return {'event_type': 'compute.instance.exists', '_context_request_id':\n 'req-1234', '_context_project_id': 'tenant-1', 'timestamp':\n '2013-06-20 17:31:57.939614', 'publisher_id':\n 'compute.global.preprod-ord.ohthree.com', 'payload': {'instance_id':\n 'ins-4567', 'status': 'saving', 'container_format': 'ovf',\n 'properties': {'image_type': 'snapshot'}, 'options': ['one', 'two',\n {'server': 'bart', 'region': 'springfield'}, 'three'], 'tenant':\n '5877054', 'old_state': 'old_state', 'old_task_state': 'old_task',\n 'image_meta': {'org.openstack__1__architecture': 'os_arch',\n 'org.openstack__1__os_distro': 'os_distro',\n 'org.openstack__1__os_version': 'os_version',\n 'com.rackspace__1__options': 'rax_opt'}, 'state': 'state',\n 'new_task_state': 'task', 'bandwidth': {'private': {'bw_in': 0,\n 'bw_out': 264902}, 'public': {'bw_in': 0, 'bw_out': 1697240969}}}}\n",
"<function token>\n"
] | false |
98,368 |
1d3ea427cc52f8bd6be63deb43a4e34f723f68c4
|
import BaseClasses as bc
class RNN(bc.GraphStructure):
rnn_cell_from_tensorflow = ['GRUCell', 'LSTMCell']
custom_rnn_cell = ['GRUPartialCell']
def __init__(self, dtype):
super(RNN, self).__init__(dtype)
pass
|
[
"import BaseClasses as bc\n\n\nclass RNN(bc.GraphStructure):\n\n rnn_cell_from_tensorflow = ['GRUCell', 'LSTMCell']\n custom_rnn_cell = ['GRUPartialCell']\n\n def __init__(self, dtype):\n super(RNN, self).__init__(dtype)\n pass\n\n\n",
"import BaseClasses as bc\n\n\nclass RNN(bc.GraphStructure):\n rnn_cell_from_tensorflow = ['GRUCell', 'LSTMCell']\n custom_rnn_cell = ['GRUPartialCell']\n\n def __init__(self, dtype):\n super(RNN, self).__init__(dtype)\n pass\n",
"<import token>\n\n\nclass RNN(bc.GraphStructure):\n rnn_cell_from_tensorflow = ['GRUCell', 'LSTMCell']\n custom_rnn_cell = ['GRUPartialCell']\n\n def __init__(self, dtype):\n super(RNN, self).__init__(dtype)\n pass\n",
"<import token>\n\n\nclass RNN(bc.GraphStructure):\n <assignment token>\n <assignment token>\n\n def __init__(self, dtype):\n super(RNN, self).__init__(dtype)\n pass\n",
"<import token>\n\n\nclass RNN(bc.GraphStructure):\n <assignment token>\n <assignment token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,369 |
3d565d3316c05bdb5c37f45ea2b362bf7ee3b7c1
|
with open("in.txt","r") as f:
lines = f.readlines()
l1 = lines[0].strip()
l2 = lines[1].strip()
ct = 0
for i in range(0,len(l1)):
if l1[i] != l2[i]:
ct += 1
print ct
|
[
"with open(\"in.txt\",\"r\") as f:\n\tlines = f.readlines()\n\tl1 = lines[0].strip()\n\tl2 = lines[1].strip()\n\tct = 0\n\tfor i in range(0,len(l1)):\n\t\tif l1[i] != l2[i]:\n\t\t\tct += 1\n\tprint ct\t\n"
] | true |
98,370 |
df6b3b2f64ebf5662c1703e51bf262f39a1a7ead
|
import pygame
class Settings(object):
def __init__(self):
self.image = pygame.image.load("images/map.jpg")
self.map_size = 3
self.block_space = 5
self.block_width = (self.image.get_rect().width - (self.map_size - 1) * self.block_space) // self.map_size
self.block_height = (self.image.get_rect().height - (self.map_size - 1) * self.block_space) // self.map_size
self.screen_size = ((self.block_width + self.block_space) * self.map_size + self.block_space,
(self.block_height + self.block_space) * self.map_size + self.block_space)
self.background = (0, 0, 0)
self.font_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
self.button_color = (0, 255, 0)
|
[
"import pygame\n\n\nclass Settings(object):\n def __init__(self):\n self.image = pygame.image.load(\"images/map.jpg\")\n\n self.map_size = 3\n\n self.block_space = 5\n self.block_width = (self.image.get_rect().width - (self.map_size - 1) * self.block_space) // self.map_size\n self.block_height = (self.image.get_rect().height - (self.map_size - 1) * self.block_space) // self.map_size\n\n self.screen_size = ((self.block_width + self.block_space) * self.map_size + self.block_space,\n (self.block_height + self.block_space) * self.map_size + self.block_space)\n self.background = (0, 0, 0)\n\n self.font_color = (255, 255, 255)\n self.font = pygame.font.SysFont(None, 48)\n\n self.button_color = (0, 255, 0)\n",
"import pygame\n\n\nclass Settings(object):\n\n def __init__(self):\n self.image = pygame.image.load('images/map.jpg')\n self.map_size = 3\n self.block_space = 5\n self.block_width = (self.image.get_rect().width - (self.map_size - \n 1) * self.block_space) // self.map_size\n self.block_height = (self.image.get_rect().height - (self.map_size -\n 1) * self.block_space) // self.map_size\n self.screen_size = (self.block_width + self.block_space\n ) * self.map_size + self.block_space, (self.block_height + self\n .block_space) * self.map_size + self.block_space\n self.background = 0, 0, 0\n self.font_color = 255, 255, 255\n self.font = pygame.font.SysFont(None, 48)\n self.button_color = 0, 255, 0\n",
"<import token>\n\n\nclass Settings(object):\n\n def __init__(self):\n self.image = pygame.image.load('images/map.jpg')\n self.map_size = 3\n self.block_space = 5\n self.block_width = (self.image.get_rect().width - (self.map_size - \n 1) * self.block_space) // self.map_size\n self.block_height = (self.image.get_rect().height - (self.map_size -\n 1) * self.block_space) // self.map_size\n self.screen_size = (self.block_width + self.block_space\n ) * self.map_size + self.block_space, (self.block_height + self\n .block_space) * self.map_size + self.block_space\n self.background = 0, 0, 0\n self.font_color = 255, 255, 255\n self.font = pygame.font.SysFont(None, 48)\n self.button_color = 0, 255, 0\n",
"<import token>\n\n\nclass Settings(object):\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,371 |
108149b71c98ba58395adfef347a77f5b922abc4
|
#cultivandostrings.py
x = int(input())
while x != 0:
x = int(input())
|
[
"#cultivandostrings.py\r\n\r\nx = int(input())\r\n\r\nwhile x != 0:\r\n\tx = int(input())",
"x = int(input())\nwhile x != 0:\n x = int(input())\n",
"<assignment token>\nwhile x != 0:\n x = int(input())\n",
"<assignment token>\n<code token>\n"
] | false |
98,372 |
a4d0675e0c8309e1902054f41f2d04fadacf5224
|
# AlCaReco for track based alignment using min. bias events
import FWCore.ParameterSet.Config as cms
import HLTrigger.HLTfilters.hltHighLevel_cfi
ALCARECOTkAlBeamHaloHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
andOr = True, ## choose logical OR between Triggerbits
eventSetupPathsKey = 'TkAlBeamHalo',
throw = False # tolerate triggers not available
)
# DCS partitions
# "EBp","EBm","EEp","EEm","HBHEa","HBHEb","HBHEc","HF","HO","RPC"
# "DT0","DTp","DTm","CSCp","CSCm","CASTOR","TIBTID","TOB","TECp","TECm"
# "BPIX","FPIX","ESp","ESm"
import DPGAnalysis.Skims.skim_detstatus_cfi
ALCARECOTkAlBeamHaloDCSFilter = DPGAnalysis.Skims.skim_detstatus_cfi.dcsstatus.clone(
DetectorType = cms.vstring('TIBTID','TOB','TECp','TECm','BPIX','FPIX'),
ApplyFilter = cms.bool(True),
AndOr = cms.bool(True),
DebugOn = cms.untracked.bool(False)
)
import Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi
ALCARECOTkAlBeamHalo = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone()
ALCARECOTkAlBeamHalo.src = 'beamhaloTracks'
ALCARECOTkAlBeamHalo.filter = True ##do not store empty events
ALCARECOTkAlBeamHalo.applyBasicCuts = True
ALCARECOTkAlBeamHalo.ptMin = 0.0 ##GeV
ALCARECOTkAlBeamHalo.etaMin = -9999
ALCARECOTkAlBeamHalo.etaMax = 9999
ALCARECOTkAlBeamHalo.nHitMin = 3
ALCARECOTkAlBeamHalo.GlobalSelector.applyIsolationtest = False
ALCARECOTkAlBeamHalo.GlobalSelector.applyGlobalMuonFilter = False
ALCARECOTkAlBeamHalo.TwoBodyDecaySelector.applyMassrangeFilter = False
ALCARECOTkAlBeamHalo.TwoBodyDecaySelector.applyChargeFilter = False
ALCARECOTkAlBeamHalo.TwoBodyDecaySelector.applyAcoplanarityFilter = False
seqALCARECOTkAlBeamHalo = cms.Sequence(ALCARECOTkAlBeamHaloDCSFilter+ALCARECOTkAlBeamHalo)
|
[
"# AlCaReco for track based alignment using min. bias events\nimport FWCore.ParameterSet.Config as cms\n\nimport HLTrigger.HLTfilters.hltHighLevel_cfi\nALCARECOTkAlBeamHaloHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(\n andOr = True, ## choose logical OR between Triggerbits\n eventSetupPathsKey = 'TkAlBeamHalo',\n throw = False # tolerate triggers not available\n )\n\n# DCS partitions\n# \"EBp\",\"EBm\",\"EEp\",\"EEm\",\"HBHEa\",\"HBHEb\",\"HBHEc\",\"HF\",\"HO\",\"RPC\"\n# \"DT0\",\"DTp\",\"DTm\",\"CSCp\",\"CSCm\",\"CASTOR\",\"TIBTID\",\"TOB\",\"TECp\",\"TECm\"\n# \"BPIX\",\"FPIX\",\"ESp\",\"ESm\"\nimport DPGAnalysis.Skims.skim_detstatus_cfi\nALCARECOTkAlBeamHaloDCSFilter = DPGAnalysis.Skims.skim_detstatus_cfi.dcsstatus.clone(\n DetectorType = cms.vstring('TIBTID','TOB','TECp','TECm','BPIX','FPIX'),\n ApplyFilter = cms.bool(True),\n AndOr = cms.bool(True),\n DebugOn = cms.untracked.bool(False)\n)\n\nimport Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi\nALCARECOTkAlBeamHalo = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone()\n\nALCARECOTkAlBeamHalo.src = 'beamhaloTracks'\nALCARECOTkAlBeamHalo.filter = True ##do not store empty events\n\nALCARECOTkAlBeamHalo.applyBasicCuts = True\nALCARECOTkAlBeamHalo.ptMin = 0.0 ##GeV\n\nALCARECOTkAlBeamHalo.etaMin = -9999\nALCARECOTkAlBeamHalo.etaMax = 9999\nALCARECOTkAlBeamHalo.nHitMin = 3\nALCARECOTkAlBeamHalo.GlobalSelector.applyIsolationtest = False\nALCARECOTkAlBeamHalo.GlobalSelector.applyGlobalMuonFilter = False\nALCARECOTkAlBeamHalo.TwoBodyDecaySelector.applyMassrangeFilter = False\nALCARECOTkAlBeamHalo.TwoBodyDecaySelector.applyChargeFilter = False\nALCARECOTkAlBeamHalo.TwoBodyDecaySelector.applyAcoplanarityFilter = False\n\nseqALCARECOTkAlBeamHalo = cms.Sequence(ALCARECOTkAlBeamHaloDCSFilter+ALCARECOTkAlBeamHalo)\n",
"import FWCore.ParameterSet.Config as cms\nimport HLTrigger.HLTfilters.hltHighLevel_cfi\nALCARECOTkAlBeamHaloHLT = (HLTrigger.HLTfilters.hltHighLevel_cfi.\n hltHighLevel.clone(andOr=True, eventSetupPathsKey='TkAlBeamHalo', throw\n =False))\nimport DPGAnalysis.Skims.skim_detstatus_cfi\nALCARECOTkAlBeamHaloDCSFilter = (DPGAnalysis.Skims.skim_detstatus_cfi.\n dcsstatus.clone(DetectorType=cms.vstring('TIBTID', 'TOB', 'TECp',\n 'TECm', 'BPIX', 'FPIX'), ApplyFilter=cms.bool(True), AndOr=cms.bool(\n True), DebugOn=cms.untracked.bool(False)))\nimport Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi\nALCARECOTkAlBeamHalo = (Alignment.CommonAlignmentProducer.\n AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone())\nALCARECOTkAlBeamHalo.src = 'beamhaloTracks'\nALCARECOTkAlBeamHalo.filter = True\nALCARECOTkAlBeamHalo.applyBasicCuts = True\nALCARECOTkAlBeamHalo.ptMin = 0.0\nALCARECOTkAlBeamHalo.etaMin = -9999\nALCARECOTkAlBeamHalo.etaMax = 9999\nALCARECOTkAlBeamHalo.nHitMin = 3\nALCARECOTkAlBeamHalo.GlobalSelector.applyIsolationtest = False\nALCARECOTkAlBeamHalo.GlobalSelector.applyGlobalMuonFilter = False\nALCARECOTkAlBeamHalo.TwoBodyDecaySelector.applyMassrangeFilter = False\nALCARECOTkAlBeamHalo.TwoBodyDecaySelector.applyChargeFilter = False\nALCARECOTkAlBeamHalo.TwoBodyDecaySelector.applyAcoplanarityFilter = False\nseqALCARECOTkAlBeamHalo = cms.Sequence(ALCARECOTkAlBeamHaloDCSFilter +\n ALCARECOTkAlBeamHalo)\n",
"<import token>\nALCARECOTkAlBeamHaloHLT = (HLTrigger.HLTfilters.hltHighLevel_cfi.\n hltHighLevel.clone(andOr=True, eventSetupPathsKey='TkAlBeamHalo', throw\n =False))\n<import token>\nALCARECOTkAlBeamHaloDCSFilter = (DPGAnalysis.Skims.skim_detstatus_cfi.\n dcsstatus.clone(DetectorType=cms.vstring('TIBTID', 'TOB', 'TECp',\n 'TECm', 'BPIX', 'FPIX'), ApplyFilter=cms.bool(True), AndOr=cms.bool(\n True), DebugOn=cms.untracked.bool(False)))\n<import token>\nALCARECOTkAlBeamHalo = (Alignment.CommonAlignmentProducer.\n AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone())\nALCARECOTkAlBeamHalo.src = 'beamhaloTracks'\nALCARECOTkAlBeamHalo.filter = True\nALCARECOTkAlBeamHalo.applyBasicCuts = True\nALCARECOTkAlBeamHalo.ptMin = 0.0\nALCARECOTkAlBeamHalo.etaMin = -9999\nALCARECOTkAlBeamHalo.etaMax = 9999\nALCARECOTkAlBeamHalo.nHitMin = 3\nALCARECOTkAlBeamHalo.GlobalSelector.applyIsolationtest = False\nALCARECOTkAlBeamHalo.GlobalSelector.applyGlobalMuonFilter = False\nALCARECOTkAlBeamHalo.TwoBodyDecaySelector.applyMassrangeFilter = False\nALCARECOTkAlBeamHalo.TwoBodyDecaySelector.applyChargeFilter = False\nALCARECOTkAlBeamHalo.TwoBodyDecaySelector.applyAcoplanarityFilter = False\nseqALCARECOTkAlBeamHalo = cms.Sequence(ALCARECOTkAlBeamHaloDCSFilter +\n ALCARECOTkAlBeamHalo)\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n"
] | false |
98,373 |
710deae8ea3a1c4a76d1ad9c08ada4f188979df8
|
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from suggestion.models import Filetype, Tool, StandaloneTool, ErgatisTool, GalaxyTool, ToolFiletype
from forms import UserQueryForm, FiletypeForm, ToolForm, ToolFiletypeForm
from django.core.context_processors import csrf
# Create your views here.
def biotools(request):
#diplays tools loaded into the database
return render_to_response('biotools.html', {'tools': Tool.objects.order_by('name')})
def biotool(request, tool_id=1):
#displays the attributes of the selected tool
toolfiletype = ToolFiletype.objects.filter(tool_id__in=tool_id)
return render_to_response('biotool.html', {'tool': Tool.objects.get(id=tool_id), 'toolfileype': toolfiletype})
def add_biotool(request):
if request.POST:
form = ToolForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return HttpResponseRedirect('/biotools/all')
else:
form = ToolForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('add_biotool.html', args)
def user_query(request):
#collects the user's query and stores it for use in 'suggestion' view
if request.POST:
form = UserQueryForm(request.POST)
if form.is_valid():
u_formatname = request.POST.getlist('user_formatname')
request.session['u_formatname'] = u_formatname
return HttpResponseRedirect('/biotools/suggestion', u_formatname)
else:
form = UserQueryForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('user_query.html', args)
def suggestion(request, init_input=(), tools=()):
#passes the user's initial file selection from user_query view into this view
init_input = request.session['u_formatname']
user_input = Filetype.objects.filter(id__in=init_input).values_list('name', flat=True)
#Gathers the tool id's that are entered as 'input' and ONLY take the selected filetype as input
toolfiletype_inputs_list = ToolFiletype.objects.filter(io_type = "i", required=False).filter(filetype_id__in = init_input).values_list('tool_id')
#Returns the name(s) of the tool(s) that only need selected filetype to run
tools = Tool.objects.filter(id__in=toolfiletype_inputs_list).values_list('name', flat=True).order_by('name')
#Find additional possible filetypes could added to suggestion
add_tools = ToolFiletype.objects.filter(io_type="i", required=True).filter(filetype_id__in = init_input).values_list('tool_id')
add_tool_names = Tool.objects.filter(id__in=add_tools).values_list('name', flat=True)
#Gathers entries that require filetypes in addition to the selected filetype
toolfiletype_entries = ToolFiletype.objects.filter(io_type="i", required=True).exclude(filetype_id__in= init_input)
return render_to_response('suggestion.html', {'user_input': user_input, 'tools': tools, 'add_tool_names': add_tool_names, 'toolfiletype_entries': toolfiletype_entries})
|
[
"from django.shortcuts import render_to_response\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom suggestion.models import Filetype, Tool, StandaloneTool, ErgatisTool, GalaxyTool, ToolFiletype\nfrom forms import UserQueryForm, FiletypeForm, ToolForm, ToolFiletypeForm\nfrom django.core.context_processors import csrf\n\n\n# Create your views here.\n\ndef biotools(request):\n\t#diplays tools loaded into the database\n\treturn render_to_response('biotools.html', {'tools': Tool.objects.order_by('name')})\n\n\ndef biotool(request, tool_id=1):\n\t#displays the attributes of the selected tool\n\ttoolfiletype = ToolFiletype.objects.filter(tool_id__in=tool_id)\n\treturn render_to_response('biotool.html', {'tool': Tool.objects.get(id=tool_id), 'toolfileype': toolfiletype})\n\n\ndef add_biotool(request):\n\tif request.POST:\n\t form = ToolForm(request.POST, request.FILES)\n\t if form.is_valid():\n\t\tform.save()\n\n\t\treturn HttpResponseRedirect('/biotools/all')\n\telse:\n\t form = ToolForm()\n\n\targs = {}\n\targs.update(csrf(request))\n\t\n\targs['form'] = form\n\n\treturn render_to_response('add_biotool.html', args)\n\n\ndef user_query(request):\n\t#collects the user's query and stores it for use in 'suggestion' view\n\tif request.POST:\n\t\tform = UserQueryForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tu_formatname = request.POST.getlist('user_formatname')\t\t\t\n\t\t\trequest.session['u_formatname'] = u_formatname\n\t\t\t\n\t\t\treturn HttpResponseRedirect('/biotools/suggestion', u_formatname)\n\telse:\n\t\tform = UserQueryForm()\n\n\targs = {}\n\targs.update(csrf(request))\n\t\t\n\targs['form'] = form\n\n\treturn render_to_response('user_query.html', args)\n\n\ndef suggestion(request, init_input=(), tools=()):\n\t#passes the user's initial file selection from user_query view into this view\n\tinit_input = request.session['u_formatname']\n\tuser_input = Filetype.objects.filter(id__in=init_input).values_list('name', flat=True)\n\t\n\t#Gathers the tool id's that are entered as 'input' and ONLY take the selected filetype as input\n\ttoolfiletype_inputs_list = ToolFiletype.objects.filter(io_type = \"i\", required=False).filter(filetype_id__in = init_input).values_list('tool_id')\n\n\t#Returns the name(s) of the tool(s) that only need selected filetype to run\n\ttools = Tool.objects.filter(id__in=toolfiletype_inputs_list).values_list('name', flat=True).order_by('name')\n\n\t#Find additional possible filetypes could added to suggestion \n\tadd_tools = ToolFiletype.objects.filter(io_type=\"i\", required=True).filter(filetype_id__in = init_input).values_list('tool_id')\n\tadd_tool_names = Tool.objects.filter(id__in=add_tools).values_list('name', flat=True)\n\n\t#Gathers entries that require filetypes in addition to the selected filetype \n\ttoolfiletype_entries = ToolFiletype.objects.filter(io_type=\"i\", required=True).exclude(filetype_id__in= init_input)\n\n\treturn render_to_response('suggestion.html', {'user_input': user_input, 'tools': tools, 'add_tool_names': add_tool_names, 'toolfiletype_entries': toolfiletype_entries})\n\n\n\n\n\n\n"
] | true |
98,374 |
2f397ac6518b2e3a7282e1472bb053e0eebd3eba
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
# 核心思想:
# 类似加法的原理, 我们从低位(链条第一位)开始,同位相加,满10高位+1
# 当两个链表和进位都为0时,则退出
# 设置头节点 ans = ListNode(0)
# 设置一个中间变量tmp,tmp = ans
# tmp和ans此时同时指向 ListNode(0),共用一片内存空间
# 在计算一位加法完成后,得出值为x,则tmp = ans = ListNode(x)
# 设置tmp的下一个节点tmp.next,并令其值为ListNode(0) ,即ListNode(x).next= ListNode(0)
# 即tmp.next = ListNode(0) ,tmp和ans共有一片内存空间,则ans.next=ListNode(0)
# tmp向后移动,令 tmp = tmp.next,此时ans和tmp不相等了,ans.next = tmp
# 再进行依次加法运算,得出值为x1,则tmp = ListNode(x1)
# 设置tmp的下一个节点tmp.next,并令其值为ListNode(0)
# 即tmp.next = ListNode(0),即ListNode(x1).next= ListNode(0)
# 再使tmp向后移动,令 tmp = tmp.next,
# 此时构建出链表:ListNode(x)-->ListNode(x1)-->ListNode(0)
# 先设置tep的下一个节点的值,再令tmp = tmp.next,tmp向后移动
# 以此类推构建出一条列表
ans = ListNode(0) # 头结点,无存储,指向链表第一个结点
tmp = ans # 初始化链表结点
tmpsum = 0 # 初始化 进一 的数
while True:
# 依次遍历l1 l2,对应位相加
if l1 != None:
tmpsum += l1.val
l1 = l1.next
if l2 != None:
tmpsum += l2.val
l2 = l2.next
tmp.val = tmpsum % 10 # 除10取余作为当前位的值
tmpsum //= 10 #除10取整,即高位,作为指针的下个结点 进行加法运算
if l1 == None and l2 == None and tmpsum == 0:
break
tmp.next = ListNode(0)
# 指向链表的下一位,这个值随意,在tmp.val = tmpsum % 10会改变这个值
tmp = tmp.next # 更新指针,往后移动
return ans
var1 = ListNode(2)
var2 = ListNode(4)
var3 = ListNode(3)
var1.next = var2
var2.next = var3
# var3.next = None
var4 = ListNode(5)
var5 = ListNode(6)
var6 = ListNode(4)
var4.next = var5
var5.next = var6
# var6.next = None
result = Solution().addTwoNumbers(var1, var4)
print(result.val)
print(result.next.val)
print(result.next.val)
while result:
if result is not None:
print(result.val)
result = result.next
|
[
"class ListNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.next = None\r\n\r\n\r\nclass Solution:\r\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\r\n \r\n # 核心思想:\r\n # 类似加法的原理, 我们从低位(链条第一位)开始,同位相加,满10高位+1\r\n # 当两个链表和进位都为0时,则退出\r\n\r\n # 设置头节点 ans = ListNode(0)\r\n # 设置一个中间变量tmp,tmp = ans\r\n # tmp和ans此时同时指向 ListNode(0),共用一片内存空间\r\n # 在计算一位加法完成后,得出值为x,则tmp = ans = ListNode(x)\r\n # 设置tmp的下一个节点tmp.next,并令其值为ListNode(0) ,即ListNode(x).next= ListNode(0)\r\n # 即tmp.next = ListNode(0) ,tmp和ans共有一片内存空间,则ans.next=ListNode(0) \r\n # tmp向后移动,令 tmp = tmp.next,此时ans和tmp不相等了,ans.next = tmp\r\n \r\n # 再进行依次加法运算,得出值为x1,则tmp = ListNode(x1)\r\n # 设置tmp的下一个节点tmp.next,并令其值为ListNode(0) \r\n # 即tmp.next = ListNode(0),即ListNode(x1).next= ListNode(0)\r\n # 再使tmp向后移动,令 tmp = tmp.next,\r\n\r\n # 此时构建出链表:ListNode(x)-->ListNode(x1)-->ListNode(0) \r\n \r\n # 先设置tep的下一个节点的值,再令tmp = tmp.next,tmp向后移动\r\n # 以此类推构建出一条列表\r\n\r\n\r\n ans = ListNode(0) # 头结点,无存储,指向链表第一个结点\r\n tmp = ans # 初始化链表结点\r\n tmpsum = 0 # 初始化 进一 的数\r\n\r\n while True:\r\n # 依次遍历l1 l2,对应位相加\r\n if l1 != None:\r\n tmpsum += l1.val\r\n l1 = l1.next\r\n if l2 != None:\r\n tmpsum += l2.val\r\n l2 = l2.next\r\n tmp.val = tmpsum % 10 # 除10取余作为当前位的值\r\n tmpsum //= 10 #除10取整,即高位,作为指针的下个结点 进行加法运算\r\n if l1 == None and l2 == None and tmpsum == 0:\r\n break\r\n tmp.next = ListNode(0) \r\n # 指向链表的下一位,这个值随意,在tmp.val = tmpsum % 10会改变这个值\r\n tmp = tmp.next # 更新指针,往后移动\r\n return ans\r\n\r\n\r\n\r\nvar1 = ListNode(2)\r\nvar2 = ListNode(4)\r\nvar3 = ListNode(3)\r\n\r\nvar1.next = var2\r\nvar2.next = var3\r\n# var3.next = None\r\n\r\n\r\nvar4 = ListNode(5)\r\nvar5 = ListNode(6)\r\nvar6 = ListNode(4)\r\n\r\nvar4.next = var5\r\nvar5.next = var6\r\n# var6.next = None\r\n\r\nresult = Solution().addTwoNumbers(var1, var4)\r\n\r\nprint(result.val)\r\nprint(result.next.val)\r\nprint(result.next.val)\r\n\r\nwhile result:\r\n if result is not None:\r\n print(result.val)\r\n result = result.next",
"class ListNode:\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) ->ListNode:\n ans = ListNode(0)\n tmp = ans\n tmpsum = 0\n while True:\n if l1 != None:\n tmpsum += l1.val\n l1 = l1.next\n if l2 != None:\n tmpsum += l2.val\n l2 = l2.next\n tmp.val = tmpsum % 10\n tmpsum //= 10\n if l1 == None and l2 == None and tmpsum == 0:\n break\n tmp.next = ListNode(0)\n tmp = tmp.next\n return ans\n\n\nvar1 = ListNode(2)\nvar2 = ListNode(4)\nvar3 = ListNode(3)\nvar1.next = var2\nvar2.next = var3\nvar4 = ListNode(5)\nvar5 = ListNode(6)\nvar6 = ListNode(4)\nvar4.next = var5\nvar5.next = var6\nresult = Solution().addTwoNumbers(var1, var4)\nprint(result.val)\nprint(result.next.val)\nprint(result.next.val)\nwhile result:\n if result is not None:\n print(result.val)\n result = result.next\n",
"class ListNode:\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) ->ListNode:\n ans = ListNode(0)\n tmp = ans\n tmpsum = 0\n while True:\n if l1 != None:\n tmpsum += l1.val\n l1 = l1.next\n if l2 != None:\n tmpsum += l2.val\n l2 = l2.next\n tmp.val = tmpsum % 10\n tmpsum //= 10\n if l1 == None and l2 == None and tmpsum == 0:\n break\n tmp.next = ListNode(0)\n tmp = tmp.next\n return ans\n\n\n<assignment token>\nprint(result.val)\nprint(result.next.val)\nprint(result.next.val)\nwhile result:\n if result is not None:\n print(result.val)\n result = result.next\n",
"class ListNode:\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) ->ListNode:\n ans = ListNode(0)\n tmp = ans\n tmpsum = 0\n while True:\n if l1 != None:\n tmpsum += l1.val\n l1 = l1.next\n if l2 != None:\n tmpsum += l2.val\n l2 = l2.next\n tmp.val = tmpsum % 10\n tmpsum //= 10\n if l1 == None and l2 == None and tmpsum == 0:\n break\n tmp.next = ListNode(0)\n tmp = tmp.next\n return ans\n\n\n<assignment token>\n<code token>\n",
"class ListNode:\n <function token>\n\n\nclass Solution:\n\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) ->ListNode:\n ans = ListNode(0)\n tmp = ans\n tmpsum = 0\n while True:\n if l1 != None:\n tmpsum += l1.val\n l1 = l1.next\n if l2 != None:\n tmpsum += l2.val\n l2 = l2.next\n tmp.val = tmpsum % 10\n tmpsum //= 10\n if l1 == None and l2 == None and tmpsum == 0:\n break\n tmp.next = ListNode(0)\n tmp = tmp.next\n return ans\n\n\n<assignment token>\n<code token>\n",
"<class token>\n\n\nclass Solution:\n\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) ->ListNode:\n ans = ListNode(0)\n tmp = ans\n tmpsum = 0\n while True:\n if l1 != None:\n tmpsum += l1.val\n l1 = l1.next\n if l2 != None:\n tmpsum += l2.val\n l2 = l2.next\n tmp.val = tmpsum % 10\n tmpsum //= 10\n if l1 == None and l2 == None and tmpsum == 0:\n break\n tmp.next = ListNode(0)\n tmp = tmp.next\n return ans\n\n\n<assignment token>\n<code token>\n",
"<class token>\n\n\nclass Solution:\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<class token>\n<class token>\n<assignment token>\n<code token>\n"
] | false |
98,375 |
0771e64ce4ee1cfdbecd17146f6a5a2a00bf1959
|
suma = 0
for x in range(50):
if x%2==1:
suma = suma + x
print ("La suma de los 50 numeros impares: " + str(suma))
|
[
"\r\nsuma = 0\r\n\r\nfor x in range(50):\r\n if x%2==1:\r\n suma = suma + x\r\nprint (\"La suma de los 50 numeros impares: \" + str(suma))",
"suma = 0\nfor x in range(50):\n if x % 2 == 1:\n suma = suma + x\nprint('La suma de los 50 numeros impares: ' + str(suma))\n",
"<assignment token>\nfor x in range(50):\n if x % 2 == 1:\n suma = suma + x\nprint('La suma de los 50 numeros impares: ' + str(suma))\n",
"<assignment token>\n<code token>\n"
] | false |
98,376 |
3bb2a4d20fbc197362e6dd360a193e400d8cf937
|
from node import *
import maze as mz
import score
import student
import numpy as np
import pandas
import time
import sys
import os
def main():
maze = mz.Maze("maze_2.csv")
now_nd = maze.getStartPoint()
car_dir = Direction.SOUTH
point = score.Scoreboard("UID_score_maze2.csv")
interface = student.interface() #the part of calling student.py was commented out.
if(sys.argv[1] == '0'):
while (1):
#TODO: Impliment your algorithm here and return the UID for evaluation function
ndList = maze.strategy(now_nd,1,1,0.8) #the whole list of nodes should go
get_UID=interface.wait_for_node()
while get_UID == '0':
get_UID=interface.wait_for_node()
print(type(get_UID))
print('UID: ',get_UID) #UID from BT
point.add_UID(get_UID)
print('1 motion done')
for next_nd in ndList: #nd: the node should go to // type : node
act=int(maze.getAction(car_dir,now_nd,next_nd))
print('action: ',act)
interface.send_action(act) #send action
car_dir=now_nd.getDirection(next_nd)
now_nd=next_nd
get_UID=interface.wait_for_node()
while get_UID == '0':
get_UID=interface.wait_for_node()
print(type(get_UID))
print('UID: ',get_UID) #UID from BT
point.add_UID(get_UID)
print('1 motion done')
break
# ================================================
# Basically, you will get a list of nodes and corresponding UID strings after the end of algorithm.
# The function add_UID() would convert the UID string score and add it to the total score.
# In the sample code, we call this function after getting the returned list.
# You may place it to other places, just make sure that all the UID strings you get would be converted.
# ================================================
elif(sys.argv[1] == '1'):
while (1):
#TODO: Implement your algorithm here and return the UID for evaluation function
input_nd = int(input("destination: "))
if(input_nd == 0):
print("end process")
print('')
break
end_nd=maze.nd_dict[input_nd]
ndList = maze.stategy_2(now_nd,end_nd)
for next_nd in ndList: #nd: the node should go to // type : node
interface.send_action(maze.getAction(car_dir,now_nd,next_nd))#send action
car_dir=now_nd.getDirection(next_nd)
now_nd=next_nd
get_UID=interface.wait_for_node()
while get_UID == '0':
get_UID=interface.wait_for_node()
print(type(get_UID))
print('UID: ',get_UID) #UID from BT
point.add_UID(get_UID)
print('1 motion done')
"""
node = 0
while(not node):
node = interface.wait_for_node()
interface.end_process()
"""
print("complete")
print("")
a = point.getCurrentScore()
print("The total score: ", a)
if __name__=='__main__':
main()
|
[
"from node import *\nimport maze as mz\nimport score\nimport student\n\nimport numpy as np\nimport pandas\nimport time\nimport sys\nimport os\n\ndef main():\n maze = mz.Maze(\"maze_2.csv\")\n now_nd = maze.getStartPoint()\n car_dir = Direction.SOUTH\n point = score.Scoreboard(\"UID_score_maze2.csv\")\n interface = student.interface() #the part of calling student.py was commented out.\n\n if(sys.argv[1] == '0'):\n\n while (1):\n\n #TODO: Impliment your algorithm here and return the UID for evaluation function\n ndList = maze.strategy(now_nd,1,1,0.8) #the whole list of nodes should go\n get_UID=interface.wait_for_node()\n while get_UID == '0':\n get_UID=interface.wait_for_node()\n print(type(get_UID))\n print('UID: ',get_UID) #UID from BT\n point.add_UID(get_UID)\n print('1 motion done')\n\n for next_nd in ndList: #nd: the node should go to // type : node\n act=int(maze.getAction(car_dir,now_nd,next_nd))\n print('action: ',act)\n interface.send_action(act) #send action\n car_dir=now_nd.getDirection(next_nd)\n now_nd=next_nd\n get_UID=interface.wait_for_node()\n while get_UID == '0':\n get_UID=interface.wait_for_node()\n print(type(get_UID))\n print('UID: ',get_UID) #UID from BT\n point.add_UID(get_UID)\n print('1 motion done')\n break\n\n # ================================================\n # Basically, you will get a list of nodes and corresponding UID strings after the end of algorithm.\n\t\t\t# The function add_UID() would convert the UID string score and add it to the total score.\n\t\t\t# In the sample code, we call this function after getting the returned list. \n # You may place it to other places, just make sure that all the UID strings you get would be converted.\n # ================================================\n \n\n elif(sys.argv[1] == '1'):\n\n while (1):\n\n #TODO: Implement your algorithm here and return the UID for evaluation function\n input_nd = int(input(\"destination: \"))\n \n if(input_nd == 0):\n \tprint(\"end process\")\n \tprint('')\n \tbreak\n end_nd=maze.nd_dict[input_nd]\n ndList = maze.stategy_2(now_nd,end_nd)\n\n for next_nd in ndList: #nd: the node should go to // type : node\n interface.send_action(maze.getAction(car_dir,now_nd,next_nd))#send action\n car_dir=now_nd.getDirection(next_nd)\n now_nd=next_nd\n get_UID=interface.wait_for_node()\n while get_UID == '0':\n get_UID=interface.wait_for_node()\n print(type(get_UID))\n print('UID: ',get_UID) #UID from BT\n point.add_UID(get_UID)\n print('1 motion done')\n\n \"\"\"\n node = 0\n while(not node):\n node = interface.wait_for_node()\n\n interface.end_process()\n \"\"\"\n print(\"complete\")\n print(\"\")\n a = point.getCurrentScore()\n print(\"The total score: \", a)\n\nif __name__=='__main__':\n main()\n",
"from node import *\nimport maze as mz\nimport score\nimport student\nimport numpy as np\nimport pandas\nimport time\nimport sys\nimport os\n\n\ndef main():\n maze = mz.Maze('maze_2.csv')\n now_nd = maze.getStartPoint()\n car_dir = Direction.SOUTH\n point = score.Scoreboard('UID_score_maze2.csv')\n interface = student.interface()\n if sys.argv[1] == '0':\n while 1:\n ndList = maze.strategy(now_nd, 1, 1, 0.8)\n get_UID = interface.wait_for_node()\n while get_UID == '0':\n get_UID = interface.wait_for_node()\n print(type(get_UID))\n print('UID: ', get_UID)\n point.add_UID(get_UID)\n print('1 motion done')\n for next_nd in ndList:\n act = int(maze.getAction(car_dir, now_nd, next_nd))\n print('action: ', act)\n interface.send_action(act)\n car_dir = now_nd.getDirection(next_nd)\n now_nd = next_nd\n get_UID = interface.wait_for_node()\n while get_UID == '0':\n get_UID = interface.wait_for_node()\n print(type(get_UID))\n print('UID: ', get_UID)\n point.add_UID(get_UID)\n print('1 motion done')\n break\n elif sys.argv[1] == '1':\n while 1:\n input_nd = int(input('destination: '))\n if input_nd == 0:\n print('end process')\n print('')\n break\n end_nd = maze.nd_dict[input_nd]\n ndList = maze.stategy_2(now_nd, end_nd)\n for next_nd in ndList:\n interface.send_action(maze.getAction(car_dir, now_nd, next_nd))\n car_dir = now_nd.getDirection(next_nd)\n now_nd = next_nd\n get_UID = interface.wait_for_node()\n while get_UID == '0':\n get_UID = interface.wait_for_node()\n print(type(get_UID))\n print('UID: ', get_UID)\n point.add_UID(get_UID)\n print('1 motion done')\n \"\"\"\n node = 0\n while(not node):\n node = interface.wait_for_node()\n\n interface.end_process()\n \"\"\"\n print('complete')\n print('')\n a = point.getCurrentScore()\n print('The total score: ', a)\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\ndef main():\n maze = mz.Maze('maze_2.csv')\n now_nd = maze.getStartPoint()\n car_dir = Direction.SOUTH\n point = score.Scoreboard('UID_score_maze2.csv')\n interface = student.interface()\n if sys.argv[1] == '0':\n while 1:\n ndList = maze.strategy(now_nd, 1, 1, 0.8)\n get_UID = interface.wait_for_node()\n while get_UID == '0':\n get_UID = interface.wait_for_node()\n print(type(get_UID))\n print('UID: ', get_UID)\n point.add_UID(get_UID)\n print('1 motion done')\n for next_nd in ndList:\n act = int(maze.getAction(car_dir, now_nd, next_nd))\n print('action: ', act)\n interface.send_action(act)\n car_dir = now_nd.getDirection(next_nd)\n now_nd = next_nd\n get_UID = interface.wait_for_node()\n while get_UID == '0':\n get_UID = interface.wait_for_node()\n print(type(get_UID))\n print('UID: ', get_UID)\n point.add_UID(get_UID)\n print('1 motion done')\n break\n elif sys.argv[1] == '1':\n while 1:\n input_nd = int(input('destination: '))\n if input_nd == 0:\n print('end process')\n print('')\n break\n end_nd = maze.nd_dict[input_nd]\n ndList = maze.stategy_2(now_nd, end_nd)\n for next_nd in ndList:\n interface.send_action(maze.getAction(car_dir, now_nd, next_nd))\n car_dir = now_nd.getDirection(next_nd)\n now_nd = next_nd\n get_UID = interface.wait_for_node()\n while get_UID == '0':\n get_UID = interface.wait_for_node()\n print(type(get_UID))\n print('UID: ', get_UID)\n point.add_UID(get_UID)\n print('1 motion done')\n \"\"\"\n node = 0\n while(not node):\n node = interface.wait_for_node()\n\n interface.end_process()\n \"\"\"\n print('complete')\n print('')\n a = point.getCurrentScore()\n print('The total score: ', a)\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\ndef main():\n maze = mz.Maze('maze_2.csv')\n now_nd = maze.getStartPoint()\n car_dir = Direction.SOUTH\n point = score.Scoreboard('UID_score_maze2.csv')\n interface = student.interface()\n if sys.argv[1] == '0':\n while 1:\n ndList = maze.strategy(now_nd, 1, 1, 0.8)\n get_UID = interface.wait_for_node()\n while get_UID == '0':\n get_UID = interface.wait_for_node()\n print(type(get_UID))\n print('UID: ', get_UID)\n point.add_UID(get_UID)\n print('1 motion done')\n for next_nd in ndList:\n act = int(maze.getAction(car_dir, now_nd, next_nd))\n print('action: ', act)\n interface.send_action(act)\n car_dir = now_nd.getDirection(next_nd)\n now_nd = next_nd\n get_UID = interface.wait_for_node()\n while get_UID == '0':\n get_UID = interface.wait_for_node()\n print(type(get_UID))\n print('UID: ', get_UID)\n point.add_UID(get_UID)\n print('1 motion done')\n break\n elif sys.argv[1] == '1':\n while 1:\n input_nd = int(input('destination: '))\n if input_nd == 0:\n print('end process')\n print('')\n break\n end_nd = maze.nd_dict[input_nd]\n ndList = maze.stategy_2(now_nd, end_nd)\n for next_nd in ndList:\n interface.send_action(maze.getAction(car_dir, now_nd, next_nd))\n car_dir = now_nd.getDirection(next_nd)\n now_nd = next_nd\n get_UID = interface.wait_for_node()\n while get_UID == '0':\n get_UID = interface.wait_for_node()\n print(type(get_UID))\n print('UID: ', get_UID)\n point.add_UID(get_UID)\n print('1 motion done')\n \"\"\"\n node = 0\n while(not node):\n node = interface.wait_for_node()\n\n interface.end_process()\n \"\"\"\n print('complete')\n print('')\n a = point.getCurrentScore()\n print('The total score: ', a)\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
98,377 |
e84caf514c5d5802f5cee9d28882eebcbd47e6ca
|
#Design an algorithm that finds the maximum positive integer input by a user.
#The user repeatedly inputs numbers until a negative value is entered
num_int = int(input("Input a number: ")) # Do not change this line
# Fill in the missing code
# viljum geyma stærsta gildið og prenta það út í lokin þegar notandinn setur inn neikvæða tölu
max_int = 0
while num_int > 0:
if num_int > max_int:
max_int = num_int
num_int = int(input("Input a number: "))
print("The maximum is", max_int) # Do not change this line
|
[
"#Design an algorithm that finds the maximum positive integer input by a user. \n#The user repeatedly inputs numbers until a negative value is entered\n\n\nnum_int = int(input(\"Input a number: \")) # Do not change this line\n# Fill in the missing code\n\n# viljum geyma stærsta gildið og prenta það út í lokin þegar notandinn setur inn neikvæða tölu\nmax_int = 0\n\nwhile num_int > 0:\n if num_int > max_int:\n max_int = num_int\n num_int = int(input(\"Input a number: \")) \n \n\nprint(\"The maximum is\", max_int) # Do not change this line\n",
"num_int = int(input('Input a number: '))\nmax_int = 0\nwhile num_int > 0:\n if num_int > max_int:\n max_int = num_int\n num_int = int(input('Input a number: '))\nprint('The maximum is', max_int)\n",
"<assignment token>\nwhile num_int > 0:\n if num_int > max_int:\n max_int = num_int\n num_int = int(input('Input a number: '))\nprint('The maximum is', max_int)\n",
"<assignment token>\n<code token>\n"
] | false |
98,378 |
c244a5934c8cbe55b85b8e8e9c8de19b0ae00a7f
|
/Users/allo0o2a/anaconda/lib/python3.6/shutil.py
|
[
"/Users/allo0o2a/anaconda/lib/python3.6/shutil.py"
] | true |
98,379 |
c5db8be1de3aa04c7514e64792672217cd00f507
|
#!/usr/bin/env python3
import sys
import numpy as np
import struct
filename = sys.argv[1]
with open(filename, 'rb') as f:
ndim, cell_size, ivar_min, ivar_max = struct.unpack('4i', f.read(16))
cells = np.fromfile(f, dtype=np.float64)
cells_shape = [cell_size, cell_size, cell_size, ivar_max-ivar_min+1]
if (ndim==1):
cells_shape[0] = 1
cells_shape[1] = 1
if (ndim==2):
cells_shape[0] = 1
cells = cells.reshape(cells_shape, order='F')
if filename.endswith('.dat'):
filename = filename[:-4]
filename = filename + '.npy'
np.save(filename, cells)
|
[
"#!/usr/bin/env python3\n\nimport sys\nimport numpy as np\nimport struct\n\nfilename = sys.argv[1]\n\nwith open(filename, 'rb') as f:\n ndim, cell_size, ivar_min, ivar_max = struct.unpack('4i', f.read(16))\n \n cells = np.fromfile(f, dtype=np.float64)\n \ncells_shape = [cell_size, cell_size, cell_size, ivar_max-ivar_min+1]\nif (ndim==1):\n cells_shape[0] = 1\n cells_shape[1] = 1\nif (ndim==2):\n cells_shape[0] = 1\ncells = cells.reshape(cells_shape, order='F')\n\nif filename.endswith('.dat'):\n filename = filename[:-4]\n\nfilename = filename + '.npy'\n\nnp.save(filename, cells)\n",
"import sys\nimport numpy as np\nimport struct\nfilename = sys.argv[1]\nwith open(filename, 'rb') as f:\n ndim, cell_size, ivar_min, ivar_max = struct.unpack('4i', f.read(16))\n cells = np.fromfile(f, dtype=np.float64)\ncells_shape = [cell_size, cell_size, cell_size, ivar_max - ivar_min + 1]\nif ndim == 1:\n cells_shape[0] = 1\n cells_shape[1] = 1\nif ndim == 2:\n cells_shape[0] = 1\ncells = cells.reshape(cells_shape, order='F')\nif filename.endswith('.dat'):\n filename = filename[:-4]\nfilename = filename + '.npy'\nnp.save(filename, cells)\n",
"<import token>\nfilename = sys.argv[1]\nwith open(filename, 'rb') as f:\n ndim, cell_size, ivar_min, ivar_max = struct.unpack('4i', f.read(16))\n cells = np.fromfile(f, dtype=np.float64)\ncells_shape = [cell_size, cell_size, cell_size, ivar_max - ivar_min + 1]\nif ndim == 1:\n cells_shape[0] = 1\n cells_shape[1] = 1\nif ndim == 2:\n cells_shape[0] = 1\ncells = cells.reshape(cells_shape, order='F')\nif filename.endswith('.dat'):\n filename = filename[:-4]\nfilename = filename + '.npy'\nnp.save(filename, cells)\n",
"<import token>\n<assignment token>\nwith open(filename, 'rb') as f:\n ndim, cell_size, ivar_min, ivar_max = struct.unpack('4i', f.read(16))\n cells = np.fromfile(f, dtype=np.float64)\n<assignment token>\nif ndim == 1:\n cells_shape[0] = 1\n cells_shape[1] = 1\nif ndim == 2:\n cells_shape[0] = 1\n<assignment token>\nif filename.endswith('.dat'):\n filename = filename[:-4]\n<assignment token>\nnp.save(filename, cells)\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,380 |
9db2c581e38b53653672eb8773b0c3b46ba07d22
|
from rest_framework import serializers
from .models import support_team, configure_item, application
class STSerializer(serializers.ModelSerializer):
class Meta:
model = support_team
fields = '__all__'
class CISerializer(serializers.ModelSerializer):
class Meta:
model = configure_item
fields = '__all__'
class APPSerializer(serializers.ModelSerializer):
class Meta:
model = application
fields = '__all__'
|
[
"from rest_framework import serializers\r\nfrom .models import support_team, configure_item, application\r\n\r\n\r\nclass STSerializer(serializers.ModelSerializer):\r\n\r\n class Meta:\r\n model = support_team\r\n fields = '__all__'\r\n\r\n\r\nclass CISerializer(serializers.ModelSerializer):\r\n\r\n class Meta:\r\n model = configure_item\r\n fields = '__all__'\r\n\r\n\r\nclass APPSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = application\r\n fields = '__all__'",
"from rest_framework import serializers\nfrom .models import support_team, configure_item, application\n\n\nclass STSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = support_team\n fields = '__all__'\n\n\nclass CISerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = configure_item\n fields = '__all__'\n\n\nclass APPSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = application\n fields = '__all__'\n",
"<import token>\n\n\nclass STSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = support_team\n fields = '__all__'\n\n\nclass CISerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = configure_item\n fields = '__all__'\n\n\nclass APPSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = application\n fields = '__all__'\n",
"<import token>\n<class token>\n\n\nclass CISerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = configure_item\n fields = '__all__'\n\n\nclass APPSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = application\n fields = '__all__'\n",
"<import token>\n<class token>\n<class token>\n\n\nclass APPSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = application\n fields = '__all__'\n",
"<import token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,381 |
4ed8aa2be5d78b8c11269cceac4062fc85e79369
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <[email protected]>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import netius
from . import common
FILE_WORK = 20
ERROR_ACTION = -1
OPEN_ACTION = 1
CLOSE_ACTION = 2
READ_ACTION = 3
WRITE_ACTION = 4
class FileThread(common.Thread):
def execute(self, work):
type = work[0]
if not type == FILE_WORK: netius.NotImplemented(
"Cannot execute type '%d'" % type
)
try:
self._execute(work)
except BaseException as exception:
self.owner.push_event((ERROR_ACTION, exception, work[-1]))
def open(self, path, mode, data):
file = open(path)
self.owner.push_event((OPEN_ACTION, file, data))
def close(self, file, data):
file.close()
self.owner.push_event((CLOSE_ACTION, file, data))
def read(self, file, count, data):
result = file.read(count)
self.owner.push_event((READ_ACTION, result, data))
def write(self, file, buffer, data):
file.write(buffer)
self.owner.push_event((WRITE_ACTION, len(buffer), data))
def _execute(self, work):
action = work[1]
if action == OPEN_ACTION: self.open(*work[2:])
elif action == CLOSE_ACTION: self.close(*work[2:])
elif action == READ_ACTION: self.read(*work[2:])
elif action == WRITE_ACTION: self.read(*work[2:])
else: netius.NotImplemented("Undefined file action '%d'" % action)
class FilePool(common.EventPool):
def __init__(self, base = FileThread, count = 10):
common.EventPool.__init__(self, base = base, count = count)
def open(self, path, mode = "r", data = None):
work = (FILE_WORK, OPEN_ACTION, path, mode, data)
self.push(work)
def close(self, file, data = None):
work = (FILE_WORK, CLOSE_ACTION, file, data)
self.push(work)
def read(self, file, count = -1, data = None):
work = (FILE_WORK, READ_ACTION, file, count, data)
self.push(work)
def write(self, file, buffer, data = None):
work = (FILE_WORK, WRITE_ACTION, file, buffer, data)
self.push(work)
|
[
"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n# Hive Netius System\r\n# Copyright (c) 2008-2020 Hive Solutions Lda.\r\n#\r\n# This file is part of Hive Netius System.\r\n#\r\n# Hive Netius System is free software: you can redistribute it and/or modify\r\n# it under the terms of the Apache License as published by the Apache\r\n# Foundation, either version 2.0 of the License, or (at your option) any\r\n# later version.\r\n#\r\n# Hive Netius System is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# Apache License for more details.\r\n#\r\n# You should have received a copy of the Apache License along with\r\n# Hive Netius System. If not, see <http://www.apache.org/licenses/>.\r\n\r\n__author__ = \"João Magalhães <[email protected]>\"\r\n\"\"\" The author(s) of the module \"\"\"\r\n\r\n__version__ = \"1.0.0\"\r\n\"\"\" The version of the module \"\"\"\r\n\r\n__revision__ = \"$LastChangedRevision$\"\r\n\"\"\" The revision number of the module \"\"\"\r\n\r\n__date__ = \"$LastChangedDate$\"\r\n\"\"\" The last change date of the module \"\"\"\r\n\r\n__copyright__ = \"Copyright (c) 2008-2020 Hive Solutions Lda.\"\r\n\"\"\" The copyright for the module \"\"\"\r\n\r\n__license__ = \"Apache License, Version 2.0\"\r\n\"\"\" The license for the module \"\"\"\r\n\r\nimport netius\r\n\r\nfrom . import common\r\n\r\nFILE_WORK = 20\r\n\r\nERROR_ACTION = -1\r\nOPEN_ACTION = 1\r\nCLOSE_ACTION = 2\r\nREAD_ACTION = 3\r\nWRITE_ACTION = 4\r\n\r\nclass FileThread(common.Thread):\r\n\r\n def execute(self, work):\r\n type = work[0]\r\n if not type == FILE_WORK: netius.NotImplemented(\r\n \"Cannot execute type '%d'\" % type\r\n )\r\n\r\n try:\r\n self._execute(work)\r\n except BaseException as exception:\r\n self.owner.push_event((ERROR_ACTION, exception, work[-1]))\r\n\r\n def open(self, path, mode, data):\r\n file = open(path)\r\n self.owner.push_event((OPEN_ACTION, file, data))\r\n\r\n def close(self, file, data):\r\n file.close()\r\n self.owner.push_event((CLOSE_ACTION, file, data))\r\n\r\n def read(self, file, count, data):\r\n result = file.read(count)\r\n self.owner.push_event((READ_ACTION, result, data))\r\n\r\n def write(self, file, buffer, data):\r\n file.write(buffer)\r\n self.owner.push_event((WRITE_ACTION, len(buffer), data))\r\n\r\n def _execute(self, work):\r\n action = work[1]\r\n if action == OPEN_ACTION: self.open(*work[2:])\r\n elif action == CLOSE_ACTION: self.close(*work[2:])\r\n elif action == READ_ACTION: self.read(*work[2:])\r\n elif action == WRITE_ACTION: self.read(*work[2:])\r\n else: netius.NotImplemented(\"Undefined file action '%d'\" % action)\r\n\r\nclass FilePool(common.EventPool):\r\n\r\n def __init__(self, base = FileThread, count = 10):\r\n common.EventPool.__init__(self, base = base, count = count)\r\n\r\n def open(self, path, mode = \"r\", data = None):\r\n work = (FILE_WORK, OPEN_ACTION, path, mode, data)\r\n self.push(work)\r\n\r\n def close(self, file, data = None):\r\n work = (FILE_WORK, CLOSE_ACTION, file, data)\r\n self.push(work)\r\n\r\n def read(self, file, count = -1, data = None):\r\n work = (FILE_WORK, READ_ACTION, file, count, data)\r\n self.push(work)\r\n\r\n def write(self, file, buffer, data = None):\r\n work = (FILE_WORK, WRITE_ACTION, file, buffer, data)\r\n self.push(work)\r\n",
"__author__ = 'João Magalhães <[email protected]>'\n<docstring token>\n__version__ = '1.0.0'\n<docstring token>\n__revision__ = '$LastChangedRevision$'\n<docstring token>\n__date__ = '$LastChangedDate$'\n<docstring token>\n__copyright__ = 'Copyright (c) 2008-2020 Hive Solutions Lda.'\n<docstring token>\n__license__ = 'Apache License, Version 2.0'\n<docstring token>\nimport netius\nfrom . import common\nFILE_WORK = 20\nERROR_ACTION = -1\nOPEN_ACTION = 1\nCLOSE_ACTION = 2\nREAD_ACTION = 3\nWRITE_ACTION = 4\n\n\nclass FileThread(common.Thread):\n\n def execute(self, work):\n type = work[0]\n if not type == FILE_WORK:\n netius.NotImplemented(\"Cannot execute type '%d'\" % type)\n try:\n self._execute(work)\n except BaseException as exception:\n self.owner.push_event((ERROR_ACTION, exception, work[-1]))\n\n def open(self, path, mode, data):\n file = open(path)\n self.owner.push_event((OPEN_ACTION, file, data))\n\n def close(self, file, data):\n file.close()\n self.owner.push_event((CLOSE_ACTION, file, data))\n\n def read(self, file, count, data):\n result = file.read(count)\n self.owner.push_event((READ_ACTION, result, data))\n\n def write(self, file, buffer, data):\n file.write(buffer)\n self.owner.push_event((WRITE_ACTION, len(buffer), data))\n\n def _execute(self, work):\n action = work[1]\n if action == OPEN_ACTION:\n self.open(*work[2:])\n elif action == CLOSE_ACTION:\n self.close(*work[2:])\n elif action == READ_ACTION:\n self.read(*work[2:])\n elif action == WRITE_ACTION:\n self.read(*work[2:])\n else:\n netius.NotImplemented(\"Undefined file action '%d'\" % action)\n\n\nclass FilePool(common.EventPool):\n\n def __init__(self, base=FileThread, count=10):\n common.EventPool.__init__(self, base=base, count=count)\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n\n def close(self, file, data=None):\n work = FILE_WORK, CLOSE_ACTION, file, data\n self.push(work)\n\n def read(self, file, count=-1, data=None):\n work = FILE_WORK, READ_ACTION, file, count, data\n self.push(work)\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"__author__ = 'João Magalhães <[email protected]>'\n<docstring token>\n__version__ = '1.0.0'\n<docstring token>\n__revision__ = '$LastChangedRevision$'\n<docstring token>\n__date__ = '$LastChangedDate$'\n<docstring token>\n__copyright__ = 'Copyright (c) 2008-2020 Hive Solutions Lda.'\n<docstring token>\n__license__ = 'Apache License, Version 2.0'\n<docstring token>\n<import token>\nFILE_WORK = 20\nERROR_ACTION = -1\nOPEN_ACTION = 1\nCLOSE_ACTION = 2\nREAD_ACTION = 3\nWRITE_ACTION = 4\n\n\nclass FileThread(common.Thread):\n\n def execute(self, work):\n type = work[0]\n if not type == FILE_WORK:\n netius.NotImplemented(\"Cannot execute type '%d'\" % type)\n try:\n self._execute(work)\n except BaseException as exception:\n self.owner.push_event((ERROR_ACTION, exception, work[-1]))\n\n def open(self, path, mode, data):\n file = open(path)\n self.owner.push_event((OPEN_ACTION, file, data))\n\n def close(self, file, data):\n file.close()\n self.owner.push_event((CLOSE_ACTION, file, data))\n\n def read(self, file, count, data):\n result = file.read(count)\n self.owner.push_event((READ_ACTION, result, data))\n\n def write(self, file, buffer, data):\n file.write(buffer)\n self.owner.push_event((WRITE_ACTION, len(buffer), data))\n\n def _execute(self, work):\n action = work[1]\n if action == OPEN_ACTION:\n self.open(*work[2:])\n elif action == CLOSE_ACTION:\n self.close(*work[2:])\n elif action == READ_ACTION:\n self.read(*work[2:])\n elif action == WRITE_ACTION:\n self.read(*work[2:])\n else:\n netius.NotImplemented(\"Undefined file action '%d'\" % action)\n\n\nclass FilePool(common.EventPool):\n\n def __init__(self, base=FileThread, count=10):\n common.EventPool.__init__(self, base=base, count=count)\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n\n def close(self, file, data=None):\n work = FILE_WORK, CLOSE_ACTION, file, data\n self.push(work)\n\n def read(self, file, count=-1, data=None):\n work = FILE_WORK, READ_ACTION, file, count, data\n self.push(work)\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass FileThread(common.Thread):\n\n def execute(self, work):\n type = work[0]\n if not type == FILE_WORK:\n netius.NotImplemented(\"Cannot execute type '%d'\" % type)\n try:\n self._execute(work)\n except BaseException as exception:\n self.owner.push_event((ERROR_ACTION, exception, work[-1]))\n\n def open(self, path, mode, data):\n file = open(path)\n self.owner.push_event((OPEN_ACTION, file, data))\n\n def close(self, file, data):\n file.close()\n self.owner.push_event((CLOSE_ACTION, file, data))\n\n def read(self, file, count, data):\n result = file.read(count)\n self.owner.push_event((READ_ACTION, result, data))\n\n def write(self, file, buffer, data):\n file.write(buffer)\n self.owner.push_event((WRITE_ACTION, len(buffer), data))\n\n def _execute(self, work):\n action = work[1]\n if action == OPEN_ACTION:\n self.open(*work[2:])\n elif action == CLOSE_ACTION:\n self.close(*work[2:])\n elif action == READ_ACTION:\n self.read(*work[2:])\n elif action == WRITE_ACTION:\n self.read(*work[2:])\n else:\n netius.NotImplemented(\"Undefined file action '%d'\" % action)\n\n\nclass FilePool(common.EventPool):\n\n def __init__(self, base=FileThread, count=10):\n common.EventPool.__init__(self, base=base, count=count)\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n\n def close(self, file, data=None):\n work = FILE_WORK, CLOSE_ACTION, file, data\n self.push(work)\n\n def read(self, file, count=-1, data=None):\n work = FILE_WORK, READ_ACTION, file, count, data\n self.push(work)\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass FileThread(common.Thread):\n\n def execute(self, work):\n type = work[0]\n if not type == FILE_WORK:\n netius.NotImplemented(\"Cannot execute type '%d'\" % type)\n try:\n self._execute(work)\n except BaseException as exception:\n self.owner.push_event((ERROR_ACTION, exception, work[-1]))\n <function token>\n\n def close(self, file, data):\n file.close()\n self.owner.push_event((CLOSE_ACTION, file, data))\n\n def read(self, file, count, data):\n result = file.read(count)\n self.owner.push_event((READ_ACTION, result, data))\n\n def write(self, file, buffer, data):\n file.write(buffer)\n self.owner.push_event((WRITE_ACTION, len(buffer), data))\n\n def _execute(self, work):\n action = work[1]\n if action == OPEN_ACTION:\n self.open(*work[2:])\n elif action == CLOSE_ACTION:\n self.close(*work[2:])\n elif action == READ_ACTION:\n self.read(*work[2:])\n elif action == WRITE_ACTION:\n self.read(*work[2:])\n else:\n netius.NotImplemented(\"Undefined file action '%d'\" % action)\n\n\nclass FilePool(common.EventPool):\n\n def __init__(self, base=FileThread, count=10):\n common.EventPool.__init__(self, base=base, count=count)\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n\n def close(self, file, data=None):\n work = FILE_WORK, CLOSE_ACTION, file, data\n self.push(work)\n\n def read(self, file, count=-1, data=None):\n work = FILE_WORK, READ_ACTION, file, count, data\n self.push(work)\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass FileThread(common.Thread):\n\n def execute(self, work):\n type = work[0]\n if not type == FILE_WORK:\n netius.NotImplemented(\"Cannot execute type '%d'\" % type)\n try:\n self._execute(work)\n except BaseException as exception:\n self.owner.push_event((ERROR_ACTION, exception, work[-1]))\n <function token>\n <function token>\n\n def read(self, file, count, data):\n result = file.read(count)\n self.owner.push_event((READ_ACTION, result, data))\n\n def write(self, file, buffer, data):\n file.write(buffer)\n self.owner.push_event((WRITE_ACTION, len(buffer), data))\n\n def _execute(self, work):\n action = work[1]\n if action == OPEN_ACTION:\n self.open(*work[2:])\n elif action == CLOSE_ACTION:\n self.close(*work[2:])\n elif action == READ_ACTION:\n self.read(*work[2:])\n elif action == WRITE_ACTION:\n self.read(*work[2:])\n else:\n netius.NotImplemented(\"Undefined file action '%d'\" % action)\n\n\nclass FilePool(common.EventPool):\n\n def __init__(self, base=FileThread, count=10):\n common.EventPool.__init__(self, base=base, count=count)\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n\n def close(self, file, data=None):\n work = FILE_WORK, CLOSE_ACTION, file, data\n self.push(work)\n\n def read(self, file, count=-1, data=None):\n work = FILE_WORK, READ_ACTION, file, count, data\n self.push(work)\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass FileThread(common.Thread):\n\n def execute(self, work):\n type = work[0]\n if not type == FILE_WORK:\n netius.NotImplemented(\"Cannot execute type '%d'\" % type)\n try:\n self._execute(work)\n except BaseException as exception:\n self.owner.push_event((ERROR_ACTION, exception, work[-1]))\n <function token>\n <function token>\n\n def read(self, file, count, data):\n result = file.read(count)\n self.owner.push_event((READ_ACTION, result, data))\n <function token>\n\n def _execute(self, work):\n action = work[1]\n if action == OPEN_ACTION:\n self.open(*work[2:])\n elif action == CLOSE_ACTION:\n self.close(*work[2:])\n elif action == READ_ACTION:\n self.read(*work[2:])\n elif action == WRITE_ACTION:\n self.read(*work[2:])\n else:\n netius.NotImplemented(\"Undefined file action '%d'\" % action)\n\n\nclass FilePool(common.EventPool):\n\n def __init__(self, base=FileThread, count=10):\n common.EventPool.__init__(self, base=base, count=count)\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n\n def close(self, file, data=None):\n work = FILE_WORK, CLOSE_ACTION, file, data\n self.push(work)\n\n def read(self, file, count=-1, data=None):\n work = FILE_WORK, READ_ACTION, file, count, data\n self.push(work)\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass FileThread(common.Thread):\n\n def execute(self, work):\n type = work[0]\n if not type == FILE_WORK:\n netius.NotImplemented(\"Cannot execute type '%d'\" % type)\n try:\n self._execute(work)\n except BaseException as exception:\n self.owner.push_event((ERROR_ACTION, exception, work[-1]))\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _execute(self, work):\n action = work[1]\n if action == OPEN_ACTION:\n self.open(*work[2:])\n elif action == CLOSE_ACTION:\n self.close(*work[2:])\n elif action == READ_ACTION:\n self.read(*work[2:])\n elif action == WRITE_ACTION:\n self.read(*work[2:])\n else:\n netius.NotImplemented(\"Undefined file action '%d'\" % action)\n\n\nclass FilePool(common.EventPool):\n\n def __init__(self, base=FileThread, count=10):\n common.EventPool.__init__(self, base=base, count=count)\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n\n def close(self, file, data=None):\n work = FILE_WORK, CLOSE_ACTION, file, data\n self.push(work)\n\n def read(self, file, count=-1, data=None):\n work = FILE_WORK, READ_ACTION, file, count, data\n self.push(work)\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass FileThread(common.Thread):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _execute(self, work):\n action = work[1]\n if action == OPEN_ACTION:\n self.open(*work[2:])\n elif action == CLOSE_ACTION:\n self.close(*work[2:])\n elif action == READ_ACTION:\n self.read(*work[2:])\n elif action == WRITE_ACTION:\n self.read(*work[2:])\n else:\n netius.NotImplemented(\"Undefined file action '%d'\" % action)\n\n\nclass FilePool(common.EventPool):\n\n def __init__(self, base=FileThread, count=10):\n common.EventPool.__init__(self, base=base, count=count)\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n\n def close(self, file, data=None):\n work = FILE_WORK, CLOSE_ACTION, file, data\n self.push(work)\n\n def read(self, file, count=-1, data=None):\n work = FILE_WORK, READ_ACTION, file, count, data\n self.push(work)\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n\n\nclass FileThread(common.Thread):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass FilePool(common.EventPool):\n\n def __init__(self, base=FileThread, count=10):\n common.EventPool.__init__(self, base=base, count=count)\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n\n def close(self, file, data=None):\n work = FILE_WORK, CLOSE_ACTION, file, data\n self.push(work)\n\n def read(self, file, count=-1, data=None):\n work = FILE_WORK, READ_ACTION, file, count, data\n self.push(work)\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass FilePool(common.EventPool):\n\n def __init__(self, base=FileThread, count=10):\n common.EventPool.__init__(self, base=base, count=count)\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n\n def close(self, file, data=None):\n work = FILE_WORK, CLOSE_ACTION, file, data\n self.push(work)\n\n def read(self, file, count=-1, data=None):\n work = FILE_WORK, READ_ACTION, file, count, data\n self.push(work)\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass FilePool(common.EventPool):\n\n def __init__(self, base=FileThread, count=10):\n common.EventPool.__init__(self, base=base, count=count)\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n\n def close(self, file, data=None):\n work = FILE_WORK, CLOSE_ACTION, file, data\n self.push(work)\n <function token>\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass FilePool(common.EventPool):\n\n def __init__(self, base=FileThread, count=10):\n common.EventPool.__init__(self, base=base, count=count)\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n <function token>\n <function token>\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass FilePool(common.EventPool):\n <function token>\n\n def open(self, path, mode='r', data=None):\n work = FILE_WORK, OPEN_ACTION, path, mode, data\n self.push(work)\n <function token>\n <function token>\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass FilePool(common.EventPool):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def write(self, file, buffer, data=None):\n work = FILE_WORK, WRITE_ACTION, file, buffer, data\n self.push(work)\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass FilePool(common.EventPool):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n"
] | false |
98,382 |
fd37252896904bd7cc9895af3cfb6868c22cf2a3
|
import numpy
import os
import librosa
if __name__ == '__main__':
features = 'features3D'
loadpath = 'D:/PythonProjects_Data/AVEC2017/'
savepath = 'D:/PythonProjects_Data/AVEC2017-OtherFeatures/Step1_%s/' % features
for foldname in os.listdir(loadpath):
if foldname.find('_P') == -1: continue
if os.path.exists(os.path.join(savepath, foldname)): continue
os.makedirs(os.path.join(savepath, foldname))
print('Treating', foldname)
transcriptData = numpy.genfromtxt(
fname=os.path.join(loadpath, foldname, '%s_TRANSCRIPT.csv' % foldname[0:foldname.find('_')]), dtype=str,
delimiter='\t')
originData = numpy.genfromtxt(
fname=os.path.join(loadpath, foldname, '%s_CLNF_%s.txt' % (foldname[0:foldname.find('_')], features)),
dtype=str, delimiter=',')
position = 1
for index in range(1, numpy.shape(transcriptData)[0]):
startPosition, endPosition = float(transcriptData[index][0]), float(transcriptData[index][1])
with open(os.path.join(savepath, foldname, '%s_%04d.csv' % (transcriptData[index][2], index)), 'w') as file:
if position >= numpy.shape(originData)[0]: break
while startPosition > float(originData[position][1]):
position += 1
if position >= numpy.shape(originData)[0]: break
if position >= numpy.shape(originData)[0]: break
while float(originData[position][1]) <= endPosition:
if originData[position][3] == 0: continue
for writeIndex in range(4, len(originData[position])):
if writeIndex != 4: file.write(',')
file.write(originData[position][writeIndex])
position += 1
file.write('\n')
if position >= numpy.shape(originData)[0]: break
if position >= numpy.shape(originData)[0]: break
# exit()
|
[
"import numpy\nimport os\nimport librosa\n\nif __name__ == '__main__':\n features = 'features3D'\n loadpath = 'D:/PythonProjects_Data/AVEC2017/'\n savepath = 'D:/PythonProjects_Data/AVEC2017-OtherFeatures/Step1_%s/' % features\n for foldname in os.listdir(loadpath):\n if foldname.find('_P') == -1: continue\n if os.path.exists(os.path.join(savepath, foldname)): continue\n os.makedirs(os.path.join(savepath, foldname))\n print('Treating', foldname)\n\n transcriptData = numpy.genfromtxt(\n fname=os.path.join(loadpath, foldname, '%s_TRANSCRIPT.csv' % foldname[0:foldname.find('_')]), dtype=str,\n delimiter='\\t')\n\n originData = numpy.genfromtxt(\n fname=os.path.join(loadpath, foldname, '%s_CLNF_%s.txt' % (foldname[0:foldname.find('_')], features)),\n dtype=str, delimiter=',')\n\n position = 1\n for index in range(1, numpy.shape(transcriptData)[0]):\n startPosition, endPosition = float(transcriptData[index][0]), float(transcriptData[index][1])\n\n with open(os.path.join(savepath, foldname, '%s_%04d.csv' % (transcriptData[index][2], index)), 'w') as file:\n if position >= numpy.shape(originData)[0]: break\n while startPosition > float(originData[position][1]):\n position += 1\n if position >= numpy.shape(originData)[0]: break\n if position >= numpy.shape(originData)[0]: break\n\n while float(originData[position][1]) <= endPosition:\n if originData[position][3] == 0: continue\n for writeIndex in range(4, len(originData[position])):\n if writeIndex != 4: file.write(',')\n file.write(originData[position][writeIndex])\n position += 1\n file.write('\\n')\n if position >= numpy.shape(originData)[0]: break\n if position >= numpy.shape(originData)[0]: break\n\n # exit()\n",
"import numpy\nimport os\nimport librosa\nif __name__ == '__main__':\n features = 'features3D'\n loadpath = 'D:/PythonProjects_Data/AVEC2017/'\n savepath = ('D:/PythonProjects_Data/AVEC2017-OtherFeatures/Step1_%s/' %\n features)\n for foldname in os.listdir(loadpath):\n if foldname.find('_P') == -1:\n continue\n if os.path.exists(os.path.join(savepath, foldname)):\n continue\n os.makedirs(os.path.join(savepath, foldname))\n print('Treating', foldname)\n transcriptData = numpy.genfromtxt(fname=os.path.join(loadpath,\n foldname, '%s_TRANSCRIPT.csv' % foldname[0:foldname.find('_')]),\n dtype=str, delimiter='\\t')\n originData = numpy.genfromtxt(fname=os.path.join(loadpath, foldname,\n '%s_CLNF_%s.txt' % (foldname[0:foldname.find('_')], features)),\n dtype=str, delimiter=',')\n position = 1\n for index in range(1, numpy.shape(transcriptData)[0]):\n startPosition, endPosition = float(transcriptData[index][0]\n ), float(transcriptData[index][1])\n with open(os.path.join(savepath, foldname, '%s_%04d.csv' % (\n transcriptData[index][2], index)), 'w') as file:\n if position >= numpy.shape(originData)[0]:\n break\n while startPosition > float(originData[position][1]):\n position += 1\n if position >= numpy.shape(originData)[0]:\n break\n if position >= numpy.shape(originData)[0]:\n break\n while float(originData[position][1]) <= endPosition:\n if originData[position][3] == 0:\n continue\n for writeIndex in range(4, len(originData[position])):\n if writeIndex != 4:\n file.write(',')\n file.write(originData[position][writeIndex])\n position += 1\n file.write('\\n')\n if position >= numpy.shape(originData)[0]:\n break\n if position >= numpy.shape(originData)[0]:\n break\n",
"<import token>\nif __name__ == '__main__':\n features = 'features3D'\n loadpath = 'D:/PythonProjects_Data/AVEC2017/'\n savepath = ('D:/PythonProjects_Data/AVEC2017-OtherFeatures/Step1_%s/' %\n features)\n for foldname in os.listdir(loadpath):\n if foldname.find('_P') == -1:\n continue\n if os.path.exists(os.path.join(savepath, foldname)):\n continue\n os.makedirs(os.path.join(savepath, foldname))\n print('Treating', foldname)\n transcriptData = numpy.genfromtxt(fname=os.path.join(loadpath,\n foldname, '%s_TRANSCRIPT.csv' % foldname[0:foldname.find('_')]),\n dtype=str, delimiter='\\t')\n originData = numpy.genfromtxt(fname=os.path.join(loadpath, foldname,\n '%s_CLNF_%s.txt' % (foldname[0:foldname.find('_')], features)),\n dtype=str, delimiter=',')\n position = 1\n for index in range(1, numpy.shape(transcriptData)[0]):\n startPosition, endPosition = float(transcriptData[index][0]\n ), float(transcriptData[index][1])\n with open(os.path.join(savepath, foldname, '%s_%04d.csv' % (\n transcriptData[index][2], index)), 'w') as file:\n if position >= numpy.shape(originData)[0]:\n break\n while startPosition > float(originData[position][1]):\n position += 1\n if position >= numpy.shape(originData)[0]:\n break\n if position >= numpy.shape(originData)[0]:\n break\n while float(originData[position][1]) <= endPosition:\n if originData[position][3] == 0:\n continue\n for writeIndex in range(4, len(originData[position])):\n if writeIndex != 4:\n file.write(',')\n file.write(originData[position][writeIndex])\n position += 1\n file.write('\\n')\n if position >= numpy.shape(originData)[0]:\n break\n if position >= numpy.shape(originData)[0]:\n break\n",
"<import token>\n<code token>\n"
] | false |
98,383 |
a8d784e7e5a75451a1c238ca54bf90d37f3d528c
|
import math
import matplotlib.pyplot as plt
import os
import pickle
import interval_arithmetic as d
from pprint import pprint
from sympy.parsing.sympy_parser import parse_expr
import sympy as sp
import os
from cusp import cusp_Ball_solver, evaluation_exp
import matplotlib.patches as mpatches
import csv
from scipy import spatial
import flint as ft
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import itertools
import timeit
import time
def ploting_boxes(boxes,uncer_boxes, var=[0,1], B=[[-20,20],[-20,20]],x=0.1,nodes=[], cusps=[],uncer_Solutions=[],Legend=False,color="green",variabel_name="x" ):
fig, ax = plt.subplots()
#plt.grid(True)
ax.set_xlim(B[0][0], B[0][1])
ax.set_ylim(B[1][0], B[1][1])
ax.set_xlabel(variabel_name+str(1))
ax.set_ylabel(variabel_name+str(2))
"""try:
ax.title(open("system.txt","r").read())
except:
pass"""
#textstr = open("system.txt","r").read()
#props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
#ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=9,
# verticalalignment='top', bbox=props)
c=0
green_patch = mpatches.Patch(color=color, label='smooth part')
red_patch = mpatches.Patch(color='red', label='unknown part')
node_patch = mpatches.Patch(color='black', label='Certified nodes',fill=None)
cusp_patch = mpatches.Patch(color='blue', label='Projection of certified solution with t=0 ',fill=None)
if Legend==True:
plt.legend(handles=[green_patch,red_patch,node_patch,cusp_patch])
for box in boxes:
rectangle= plt.Rectangle((box[var[0]][0],box[var[1]][0]) , \
(box[var[0]][1]-box[var[0]][0]),(box[var[1]][1]-box[var[1]][0]),color=color)
plt.gca().add_patch(rectangle)
for box in uncer_boxes:
rectangle= plt.Rectangle((box[var[0]][0],box[var[1]][0]) , \
(box[var[0]][1]-box[var[0]][0]),(box[var[1]][1]-box[var[1]][0]), fc='r')
plt.gca().add_patch(rectangle)
for box in nodes:
rectangle= plt.Rectangle((box[0][0]-x,box[1][0]-x) ,\
2*x+box[0][1]-box[0][0],2*x+box[1][1]-box[1][0], fc='y',fill=None)
plt.gca().add_patch(rectangle)
for box in cusps:
rectangle= plt.Rectangle((box[0][0]-x,box[1][0]-x) ,\
2*x+box[0][1]-box[0][0],2*x+box[1][1]-box[1][0], fc='y',color="blue",fill=None)
plt.gca().add_patch(rectangle)
for box in uncer_Solutions:
rectangle= plt.Rectangle((box[0][0]-x,box[1][0]-x) ,\
2*x+box[0][1]-box[0][0],2*x+box[1][1]-box[1][0], fc='y',color="red",fill=None)
plt.gca().add_patch(rectangle)
plt.savefig("fig.jpg",dpi=1000)
plt.show()
def Ball_node_gen(equations,B_Ball,X):
P=open(equations,"r").readlines()
P=[Pi.replace('\n','') for Pi in P]
n=len(X)
V=""" Variables \n """
for i in range(n):
V += "x" +str(i+1) + " in " + str(B_Ball[i]) +" ; \n"
for i in range(n,2*n-2):
V += "r" +str(i-n+3) + " in " + str(B_Ball[i]) +" ; \n"
V += "t" + " in " + str(B_Ball[2*n-2]) +" ; \n"
V +="Constraints \n"
for Pi in P:
V += SDP_str(Pi,X)[0]
V += SDP_str(Pi,X)[1]
last_eq=""
for i in range(3,n):
last_eq += "r"+str(i)+"^2+"
last_eq += "r" +str(n)+"^2 -1=0;"
V += last_eq +"\n"
f= open("eq.txt","w+")
f.write(V)
f.write("end")
f.close()
def Ball_solver(equations,B_Ball,X): #the width condition needs to be added Do not suse this one
L=[B_Ball]
certified_boxes=[]
uncertified_boxes=[]
n=len(X)
while len(L) !=0:
solvability=1
if B_Ball[2*n-2][0] <= 0 <= B_Ball[2*n-2][1] and \
d.width([ d.ftconstructor(Bi[0],Bi[1]) for Bi in L[0] ] ) <0.1 :
Ball_cusp_gen(equations,B_Ball,X)
elif (B_Ball[2*n-2][0] > 0 or 0 > B_Ball[2*n-2][1] ) \
and d.width([ d.ftconstructor(Bi[0],Bi[1]) for Bi in L[0] ] ) <0.1:
Ball_node_gen(equations,B_Ball,X)
else:
children=cb.plane_subdivision(L[0])
L.remove(L[0])
L += children
solvability=0
if solvability==1:
ibex_output=cb.solving_with_ibex()
if ibex_output[0]== "Empty":
L.remove(L[0])
elif len(ibex_output[0]) !=0:
certified_boxes +=cb.computing_boxes(ibex_output[0])
L.remove(L[0])
elif len(ibex_output[1])!=0:
uncertified_boxes +=cb.computing_boxes(ibex_output[1])
L.remove(L[0])
else:
children=cb.plane_subdivision(L[0])
L.remove(L[0])
L += children
return [certified_boxes,uncertified_boxes]
def SDP_str(P,X):
n=len(X)
P_pluse=P[:]
P_minus=P[:]
for i in range(2,n):
P_pluse=P_pluse.replace("x"+str(i+1),"(x"+str(i+1) + "+ r"+str(i+1) +"*sqrt(t))")
P_minus=P_minus.replace("x"+str(i+1),"(x"+str(i+1) + "- r"+str(i+1) +"*sqrt(t))")
SP= "0.5*(" + P_pluse + "+" +P_minus+")=0; \n"
DP= "0.5*(" + P_pluse + "- (" +P_minus+") )/(sqrt(t))=0; \n"
return [SP,DP]
def Ball_generating_system(P,B_Ball,X,eps_min=0.001):
n=len(X)
V=""" Variables \n """
for i in range(n):
if B_Ball[i][0] != B_Ball[i][1]:
V += "x" +str(i+1) + " in " + str(B_Ball[i]) +" ; \n"
else:
V += "x" +str(i+1) + " in " + str([B_Ball[i][0]-eps_min, B_Ball[i][1]+eps_min]) +" ; \n"
for i in range(n,2*n-2):
V += "r" +str(i-n+3) + " in " + str(B_Ball[i]) +" ; \n"
V += "t" + " in " + str(B_Ball[2*n-2]) +" ; \n"
V +="Constraints \n"
for Pi in P:
V += SDP_str(Pi,X)[0]
V += SDP_str(Pi,X)[1]
last_eq=""
for i in range(3,n):
last_eq += "r"+str(i)+"^2+"
last_eq += "r" +str(n)+"^2 -1=0;"
V += last_eq +"\n"
f= open("eq.txt","w+")
f.write(V)
f.write("end")
f.close()
def intersting_boxes1(f,b):
pickle_in=open(f,"rb")
curve=pickle.load(pickle_in)
pickle_in.close()
intersting_boxes=[]
uncer_boxes=[]
for box in curve[0]:
if b[0][0] <= box[0][0] <= box[0][1] <=b[0][1] and \
b[1][0] <= box[1][0] <= box[1][1] <=b[1][1]:
intersting_boxes.append(box)
for box in curve[1]:
if b[0][0] <= box[0][0] <= box[0][1] <=b[0][1] and \
b[1][0] <= box[1][0] <= box[1][1] <=b[1][1]:
uncer_boxes.append(box)
return [intersting_boxes,uncer_boxes]
def intersting_boxes(curve,b):
cer_intersting_boxes=[]
uncer_intersting_boxes=[]
for box in curve[0]:
if b[0][0] <= box[0][0] <= box[0][1] <=b[0][1] and \
b[1][0] <= box[1][0] <= box[1][1] <=b[1][1]:
cer_intersting_boxes.append(box)
for box in curve[1]:
if b[0][0] <= box[0][0] <= box[0][1] <=b[0][1] and \
b[1][0] <= box[1][0] <= box[1][1] <=b[1][1]:
uncer_intersting_boxes.append(box)
return [cer_intersting_boxes,uncer_intersting_boxes]
def ibex_output(P,B,X):
os.system("ibexsolve --eps-max=0.1 -s eq.txt > output.txt")
g=open('output.txt','r')
result=g.readlines()
T=computing_boxes(result)
return T
def estimating_t1(components,upper_bound=200000): #it works only if len(components)
t1=upper_bound
t2=0
for box1 in components[0]:
for box2 in components[1]:
a=d.distance(box1,box2).lower()
b=d.distance(box1,box2).upper()
if t1 > a:
t1=a
if t2<b:
t2=b
t=d.ftconstructor(t1,t2)
t=0.25*d.power_interval(t,2)
return [float(t.lower()),float(t.upper())]
def estimating_t(components,upper_bound=19000.8): #it works only if len(components)==2
t1=upper_bound
t2=0
for box1 in components[0]:
for box2 in components[1]:
a=d.distance(box1[2:],box2[2:])
if t1 > a[0]:
t1=a[0]
if t2<a[1]:
t2=a[1]
t=d.ftconstructor(t1,t2)
t=0.25*d.power_interval(t,2)
return [float(t.lower()),float(t.upper())]
def boxes_compare(box1,box2):
flage=0
for i in range(len(box1)-1,-1,-1):
if box1[i][0] > box2[i][0]:
return 1
if box1[i][0] < box2[i][0]:
return -1
return 0
def boxes_sort(boxes):
sorted_boxes=boxes[:]
for i in range(len(boxes)-1):
for j in range(i+1,len(boxes)):
if boxes_compare(sorted_boxes[i],sorted_boxes[j]) ==1:
sorted_boxes[i], sorted_boxes[j] =sorted_boxes[j], sorted_boxes[i]
return sorted_boxes
def connected_compnants(boxes):
#ftboxes=[ [d.ftconstructor(boxi[0],boxi[1]) for boxi in box ] for box in boxes ]
ftboxes=boxes[:]
components=[[ftboxes[0]]]
for i in range(1,len(ftboxes)):
boxi_isused=0
for j in range(len(components)):
membership=0
for k in range(len(components[j])):
if d.boxes_intersection(ftboxes[i],components[j][k]) !=[] :
components[j].append(ftboxes[i])
membership=1
boxi_isused=1
break
if membership==1:
break
if boxi_isused==0:
components.append([ftboxes[i]])
unused=list(range(len(components)))
components1=components[:]
components2=[]
while len(components1) != len(components2) :
for i in unused:
for j in [j for j in list(range(i+1,len(components))) if j in unused ]:
intersection_exists=False
is_looping=True
for boxi in components[i]:
for boxj in components[j]:
if d.boxes_intersection(boxi,boxj)!=[]:
is_looping = False
intersection_exists=True
break
if is_looping==False:
break
if intersection_exists== True:
components[i] += components[j]
unused.remove(j)
components2=components1[:]
components1=[components[k] for k in unused ]
return components1
def planner_connected_compnants(boxes):
if len(boxes)==0:
return []
ftboxes=boxes[:]
#ftboxes=[ [d.ftconstructor(boxi[0],boxi[1]) for boxi in box ] for box in boxes ]
components=[[ftboxes[0]] ]
for i in range(1,len(ftboxes)):
boxi_isused=0
for j in range(len(components)):
membership=0
for k in range(len(components[j])):
if d.boxes_intersection(ftboxes[i][:2],components[j][k][:2]) !=[]: # and \
#d.boxes_intersection(ftboxes[i],components[j][k]) ==[]:
components[j].append(ftboxes[i])
membership=1
boxi_isused=1
break
if membership==1:
break
if boxi_isused==0:
components.append([ftboxes[i]])
unused=list(range(len(components)))
components1=components[:]
components2=[]
while len(components1) != len(components2) :
for i in unused:
for j in [j for j in list(range(i+1,len(components))) if j in unused ]:
intersection_exists=False
is_looping=True
for boxi in components[i]:
for boxj in components[j]:
if d.boxes_intersection(boxi[:2],boxj[:2])!=[] :#and \
#d.boxes_intersection(boxi[:2],boxj[:2]) != [] :
is_looping = False
intersection_exists=True
break
if is_looping==False:
break
if intersection_exists== True:
components[i] += components[j]
unused.remove(j)
components2=components1[:]
components1=[components[k] for k in unused ]
return components1
def estimating_yandr(components,upper_bound=100000):
r_bounds=[[upper_bound,0]]*(len(components[0][0])-2)
r_list=[]
y_list=[]
for box1 in components[0]:
for box2 in components[1]:
ft_box1= [d.ftconstructor(Bi[0],Bi[1]) for Bi in box1 ]
ft_box2= [d.ftconstructor(Bi[0],Bi[1]) for Bi in box2 ]
y_list.append([0.5*(q1+q2) for q1,q2 in zip(ft_box1[2:],ft_box2[2:])])
norm_q1q2=d.distance(box1[2:],box2[2:])
norm_q1q2=d.ftconstructor(norm_q1q2[0],norm_q1q2[1])
q1q2=[ft_box1[i]-ft_box2[i] for i in range(2,len(box1)) ]
r=[ ri/norm_q1q2 for ri in q1q2 ]
r_list.append(r)
r=[]
y=[]
for i in range(len(y_list[0])):
yi1=min([float(y[i].lower()) for y in y_list ])
yi2=max([float(y[i].upper()) for y in y_list ])
y.append([yi1,yi2])
for i in range(len(r_list[0])):
ri1=min([float(r[i].lower()) for r in r_list ])
ri2=max([float(r[i].upper()) for r in r_list ])
r.append([ri1,ri2])
return y+r
def detecting_nodes(boxes,B,f,X,eps): #boxes are list of cer and uncer curve
mixes_boxes= [[1,box ] for box in boxes[0] ] +[[0,box ] for box in boxes[1]] #putting flaggs for cer and uncer boxes
ftboxes=[ [box[0], [d.ftconstructor(boxi[0],boxi[1]) for boxi in box[1]] ] for box in mixes_boxes ]
nodes_lifting=[]
used=[]
P=[ Pi.replace("\n","") for Pi in open(f,"r").readlines() ]
for i in range(len(ftboxes)):
for j in range(i+1,len(ftboxes)):
Mariam_ft=d.boxes_intersection(ftboxes[i][1],ftboxes[j][1])
Mariam=[[float(Bi.lower()),float(Bi.upper()) ] for Bi in Mariam_ft]
if (Mariam ==[] and \
d.boxes_intersection(ftboxes[i][1][:2],ftboxes[j][1][:2])) or\
(Mariam != [] and enclosing_curve(f,Mariam,X,eps_max=0.1) ==[[],[]] ): #needs to work more
if i not in used:
used.append(i)
nodes_lifting.append(ftboxes[i])
if j not in used:
used.append(j)
nodes_lifting.append(ftboxes[j])
components= planner_connected_compnants(nodes_lifting)
cer_components=[]
uncer_components=[]
component_normal=[]
for component in components:
boxes_component=[box[1] for box in component]
component_normal =[ [[ float(Bi.lower()), float(Bi.upper()) ] for Bi in box[1] ] for box in component ]
if 0 not in [ box[0] for box in component] and eval_file_gen(f,component_normal,X) =="[]\n" :
cer_components.append(boxes_component)
else:
uncer_components.append(boxes_component)
return [cer_components,uncer_components]
def intersect_in_2D(class1,class2,monotonicity=1):
pl_intesected_pairs=[]
if monotonicity==1:
for i in range(len(class1)):
for j in range(len(class2)):
if d.boxes_intersection(class1[i][:2],class2[j][:2]) !=[] and d.boxes_intersection(class1[i],class2[j]) ==[] :
if [class2[j],class1[i]] not in pl_intesected_pairs:
pl_intesected_pairs.append([class1[i],class2[j]])
elif monotonicity==0:
for i in range(len(class1)):
for j in range(len(class2)):
if d.boxes_intersection(class1[i][:2],class2[j][:2]) !=[]:
if [class2[j],class1[i]] not in pl_intesected_pairs:
pl_intesected_pairs.append([class1[i],class2[j]])
elif monotonicity==2:
inters_indic=[]
for i in range(len(class1)):
inters_indic.append([])
for j in range(len(class2)):
if d.boxes_intersection(class1[i][:2],class2[j][:2]) !=[]:
inters_indic[i]= inters_indic[i] +[j]
for k in range(len(class1)):
if len(inters_indic[k])> 3:
for j in range(len(inters_indic[k])):
if [class2[j],class1[k]] not in pl_intesected_pairs:
pl_intesected_pairs.append([class1[k], class2[j]])
return pl_intesected_pairs
def solving_fornodes(equations,boxes,B,X,eps=0.1):
plane_components=detecting_nodes(boxes,B,equations,X,eps)#[0]
g=open(equations,'r')
P=[ Pi.replace("\n","") for Pi in g.readlines() ]
Ball_solutions=[]
for plane_component in plane_components:
x1=float(min([ai[0].lower() for ai in plane_component]))
x2=float(max([ai[0].upper() for ai in plane_component]))
y1=float(min([ai[1].lower() for ai in plane_component]))
y2=float(max([ai[1].upper() for ai in plane_component]))
components=connected_compnants(plane_component)
r=[ [float(ri[0]),float(ri[1])] for ri in estimating_r(components) ]
t=estimating_t(components)
t=[float(t[0]),float(t[1])]
B_Ball=[[x1,x2],[y1,y2]]+r +[t]
Ball_generating_system(P,B_Ball,X)
solutionsi=ibex_output(P,B_Ball,X)
Ball_solutions +=solutionsi
return Ball_solutions
def normal_subdivision(B):
ft_B=d.subdivide([d.ftconstructor(Bi[0],Bi[1]) for Bi in B[:]])
return [d.ft_normal(Bi) for Bi in ft_B]
def plane_subdivision(B):
ft_B2=d.subdivide([d.ftconstructor(Bi[0],Bi[1]) for Bi in B[:2]])
normal_B2=[d.ft_normal(Bi) for Bi in ft_B2]
return d.cartesian_product(normal_B2,[B[2:]])
def system_generator(f,B,X):
g = open(f, "r")
L = g.readlines()
g.close()
f = open("eq.txt", "w+")
f.write("Variables \n")
for i in range(len(X)):
f.write(str(X[i]) + " in " + str(B[i]) + " ; \n")
f.write("Constraints \n")
for Li in L:
f.write(Li.replace("\n", "") + "=0; \n")
f.write("end ")
f.close()
return f
def solving_with_ibex(eps=0.1):
uncer_content=[]
cer_content=[]
os.system("ibexsolve --eps-max="+ str(eps) +" -s eq.txt > output.txt")
g=open('output.txt','r')
result=g.read()
with open('output.txt') as f:
if "successful" in result:
cer_content = f.readlines()
elif "infeasible" not in result and "done! but some boxes" in result:
uncer_content = f.readlines()
elif "infeasible problem" in result:
uncer_content="Empty"
cer_content="Empty"
return [cer_content,uncer_content]
def computing_boxes():
if "infeasible" in open("output.txt","r").read():
return "Empty"
content=open("output.txt","r").readlines()
cer=[]; uncer=[]
i=0
Answer=[]
for fi in content:
try:
a=fi.index('(')
b=fi.index(')')
T=(fi[a:b+1]).replace('(','[')
T=(fi[a:b+1]).replace('(','[')
T=T.replace(')',']')
T=T.split(";")
E=[]
i=0
for Ti in T:
Ti= Ti.replace('[',"")
Ti= Ti.replace(']',"")
Ti=Ti.replace('<','')
Ti=Ti.replace('>','')
x=Ti.index(",")
a=float(Ti[:x])
b=float(Ti[x+1:])
E.append([])
E[i]=[a,b]
i+=1
if "solution n" in fi or "boundary n" in fi:
cer.append(E)
elif "unknown n" in fi:
uncer.append(E)
except ValueError:
pass
return [cer,uncer]
def enclosing_curve(system,B,X,eps_min=0.1,eps_max=0.1):
L=[B]
certified_boxes=[]
uncertified_boxes=[]
while len(L) !=0:
system_generator(system,L[0],X)
os.system("ibexsolve --eps-max="+ str(eps_max)+" --eps-min="+ str(eps_min) + " -s eq.txt > output.txt")
content=open("output.txt","r").readlines()
ibex_output=computing_boxes()
#ibex_output=solving_with_ibex(eps)
if ibex_output ==[[],[]] and max([Bi[1]-Bi[0] for Bi in L[0] ]) < eps_min :
uncertified_boxes.append(L[0])
L.remove(L[0]);
elif ibex_output ==[[],[]] :
children=plane_subdivision(L[0])
L.remove(L[0]);
L += children # print warning ################################################################""
elif ibex_output== "Empty":
L.remove(L[0])
else:
if len(ibex_output[0]) !=0:
certified_boxes += ibex_output[0]
if len(ibex_output[1])!=0:
uncertified_boxes += ibex_output[1]
L.remove(L[0])
return [certified_boxes,uncertified_boxes]
def loopsfree_checker(f,certified_boxes,uncer_boxes,P): #Assumption: no cusps
L=eval_file_gen(f,certified_boxes,X)
while L.replace('\n',"") != "[]":
L=L.replace('[','')
L=L.replace(']','')
L=L.replace('\n','')
L=L.split(",")
for i in L:
children=normal_subdivision(certified_boxes[int(i)])
certified_boxes.remove(certified_boxes[int(i)])
for child in children:
cer_children, uncer_children= enclosing_curve(f,child,X)
certified_boxes +=cer_children
uncer_boxes +=uncer_children
L = eval_file_gen(f,certified_boxes,X)
return L
def eval_file_gen(f,boxes,X,special_function=[]): #condition: len(boxes[0]) is even
functions=["sin","cos","tan","exp"]+special_function
if len(boxes[0])==0:
return []
n=len(boxes[0])
m=len(boxes)
g=open(f,'r')
P_str=g.readlines()
P_str= [Pi.replace('\n','') for Pi in P_str]
P_str= [Pi.replace('^','**') for Pi in P_str]
P_exp= [parse_expr(Pi) for Pi in P_str]
#computing jac and the minors
jac=sp.Matrix(P_str).jacobian(sp.Matrix(X))
minor1=jac[:,1:].det()
minor2=jac[:,[i for i in range(n) if i != 1] ].det()
fil=open("evaluation_file1.py","w")
fil.write("import flint as ft \n")
fil.write("import sympy as sp \n")
fil.write("import interval_arithmetic as d \n")
fil.write("boxes="+str(boxes)+"\n")
fil.write("ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \n" )
fil.write("n=len(boxes[0])\n")
fil.write("m=len(boxes)\n")
fil.write("m1=[]\n")
fil.write("m2=[]\n")
minor1_str=str(minor1)
minor2_str=str(minor2)
for i in range(n):
minor1_str= minor1_str.replace("x"+str(i+1),"B["+str(i)+"]" )
minor2_str= minor2_str.replace("x"+str(i+1),"B["+str(i)+"]" )
for func in functions:
minor1_str=minor1_str.replace(func,"ft.arb."+func)
minor2_str=minor2_str.replace(func,"ft.arb."+func)
fil.write("for B in ftboxes: \n")
fil.write(" m1.append(ft.arb("+ minor1_str + ")) \n")
fil.write(" m2.append( ft.arb("+ minor2_str + ")) \n")
fil.write("innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\n")
fil.write("print(innrer_loops)\n")
fil.close()
t=os.popen("python3 evaluation_file1.py ").read()
return t
def boxes_classifier(system,boxes,X,special_function=[]):
if len(boxes[0])==0:
return [[],[],boxes[1]]
certified_boxes ,uncer_boxes =boxes
L=eval_file_gen(system,certified_boxes,X)
if L==[]:
return [[],[],uncer_boxes]
it=0
L=L.replace('[','')
L=L.replace(']','')
L=L.replace('\n','')
L=L.split(",")
if L !=[""]:
L=[int(li) for li in L]
return [ [certified_boxes[i] for i in range(len(certified_boxes)) if i not in L] ,\
[certified_boxes[i] for i in L ], \
uncer_boxes ]
else:
return [ [certified_boxes[i] for i in range(len(certified_boxes)) if i not in L] ,[], uncer_boxes ] #can be enhanced
def projection_checker(solutions):
if len(solutions)==0:
return [[],[]]
m=len(solutions[0])
n=int((m+1)/2)
intersect_in2d=[[]]*len(solutions)
for i in range(len(solutions)-1):
for j in range(i+1,len(solutions)):
if solutions[i]==solutions[j]:
continue
elif d.boxes_intersection(solutions[i][:2],solutions[j][:2]) !=[] and (\
(d.boxes_intersection(solutions[i][n:2*n-2],[[-Bi[1],-Bi[0]] for Bi in solutions[j][n:2*n-2]]) ==[] and \
d.boxes_intersection(solutions[i][n:2*n-2],[[Bi[0],Bi[1]] for Bi in solutions[j][n:2*n-2]]) ==[] ) \
or \
d.boxes_intersection(solutions[i][2:n]+[solutions[i][2*n-2]], solutions[j][2:n]+[solutions[j][2*n-2]]) ==[]) :
intersect_in2d[i] = intersect_in2d[i]+[ j]
accepted=[]
acc_ind=[]
unaccepted=[]
unacc_ind=[]
for i in range(len(solutions)):
if len(intersect_in2d[i]) ==0 and i not in unacc_ind+acc_ind:
accepted.append(solutions[i])
acc_ind.append(i)
continue
elif i not in unacc_ind+acc_ind:
unaccepted.append(solutions[i])
unacc_ind.append(i)
for k in intersect_in2d[i]:
if k not in unacc_ind:
unaccepted.append(solutions[k])
unacc_ind.append(k)
#pprint(sp.Matrix(unaccepted));input()
return [accepted, unaccepted]
def Ball_given_2nboxes(system,X, B1,B2, monotonicity_B1=1,monotonicity_B2=1):
B1_ft=[d.ftconstructor(Bi[0],Bi[1]) for Bi in B1]
B2_ft=[d.ftconstructor(Bi[0],Bi[1]) for Bi in B2]
P=[Pi.replace("\n","") for Pi in open(system,"r").readlines()]
sol="Empty"
if d.boxes_intersection(B1_ft, B2_ft) ==[] and monotonicity_B1== monotonicity_B2==1:
t=estimating_t([[B1_ft], [B2_ft]])
y_and_r=estimating_yandr([[B1_ft], [B2_ft]])
intersec_B1B2_in2d=d.boxes_intersection(B1_ft[:2],B2_ft[:2])
intersec_B1B2_in2d=[ [float(Bi.lower()),float(Bi.upper())] for Bi in intersec_B1B2_in2d ]
B_Ball=intersec_B1B2_in2d +y_and_r +[t]
Ball_node_gen(system,B_Ball,X)
os.system("ibexsolve --eps-max=0.1 -s eq.txt > output.txt")
sol=computing_boxes()
#if d.boxes_intersection(B1_ft, B2_ft) ==[]:
# pass
return sol
def all_pairs_oflist(L):
pairs=[]
for i in range(len(L)-1):
for j in range(i+1,len(L)):
pairs.append([L[i],L[j]])
return pairs
def checking_assumptions(curve_data): #the input of this function is the output of Ball_solver
if len(curve_data[0][1]) !=0 :
return 0
Ball_sols_ft=[[d.ftconstructor(Bi[0],Bi[1]) for Bi in B] for B in curve_data[1][0]]+[[d.ftconstructor(Bi[0],Bi[1]) for Bi in B] for B in curve_data[1][1]]
alph3=assum_alph3_checker(Ball_sols_ft)
if alph3==1 :
return 1
else:
return 0
def csv_saver(L,type_L="Ball"):
dic=[]
if type_L== "Ball" :
n=int((len(L[0])+1)/2)
for j in range(len(L)):
dic.append({})
for i in range(n):
dic[j]["x"+str(i+1)]=L[j][i]
for i in range(n,2*n-2):
dic[j]["r"+str(i+3-n)]=L[j][i]
dic[j]["t"]= L[j][2*n-2]
return dic
def dict2csv(dictlist, csvfile):
"""
Takes a list of dictionaries as input and outputs a CSV file.
"""
f = open(csvfile, 'wb')
fieldnames = dictlist[0].keys()
csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)
csvwriter.writerow(dict((fn, fn) for fn in fieldnames))
for row in dictlist:
csvwriter.writerow(row)
fn.close()
def assum_alph3_checker(solutions):
comparing_list=[[]]*len(solutions)
for i in range(len(solutions)-1):
for j in range(i+1,len(solutions)):
if d.boxes_intersection(solutions[i][:2],solutions[j][:2]) !=[]:
comparing_list[i].append(j)
comparing_list[j].append(i)
matching=[len(T) for T in comparing_list]
if max(matching) <=2:
return 1
else:
return 0
def plotting_3D(boxes,Box,var=[0,1,2]):
ax = plt.figure().add_subplot(111, projection='3d')
ax.set_xlim(Box[0][0], Box[0][1])
ax.set_ylim(Box[1][0], Box[1][1])
ax.set_zlim(Box[2][0], Box[2][1])
ax.set_xlabel("x"+str(var[0]+1))
ax.set_ylabel("x"+str(var[1]+1))
ax.set_zlabel("x"+str(var[2]+1))
for box in boxes :
V=[[box[j][0] for j in range(3)] , [box[j][1] for j in range(3)]]
#ax.scatter3D(box[0], box[1], box[2])
points =list(itertools.product(*box))
faces=[[points[0],points[2],points[6],points[4]],
[points[0],points[2],points[3],points[1]],
[points[0],points[1],points[5],points[4]],
[points[2],points[3],points[7],points[6]],
[points[1],points[3],points[7],points[5]]]
ax.add_collection3d(Poly3DCollection(faces,
facecolors='green', linewidths=1,edgecolors='green', alpha=.25))
plt.show()
def enclosing_singularities(system,boxes,B,X,eps_max=0.1,eps_min=0.01): #there still computing Ball On the case where tow monotonic boxes intersect
combin=[]
ball=[]
start_combin=time.time()
n=len(B);
P=[Pi.replace("\n","") for Pi in open(system,"r").readlines()]
certified_boxes, uncertified_boxes= boxes
classes= boxes_classifier(system,boxes,X,special_function=[])
cer_Solutions=[]
uncer_Solutions=[]
H=[]
#############################################################################
#Solving Ball for B1 and B2 in R^n such that C is monotonic in B1 and B2
#######################################################################
#monotonic_pairs=intersect_in_2D(classes[0],classes[0])
#monotonic_componants=[ Bi[0] for Bi in monotonic_pairs ] +[ Bi[1] for Bi in monotonic_pairs ]
#Guillaume's suggestion:
mon_mid=[[0.5*(Bij[1]+Bij[0]) for Bij in Bi[:2] ] for Bi in classes[0] ]
mon_rad=[ max([0.5*(Bij[1]-Bij[0]) for Bij in Bi[:2] ]) for Bi in classes[0] ]
tree = spatial.KDTree(mon_mid)
intersting_boxes=[tree.query_ball_point(m,r=(math.sqrt(2))*r) for m,r in zip(mon_mid,mon_rad)]
#Ask Guillaume why this step is needed:
"""for i in range(len(ball)):
for j in ball[i]:
if i not in ball[j]:
ball[j].append(i)"""
intersting_boxes=[indi for indi in intersting_boxes if len(indi) >3 ]#and len(connected_compnants([classes[0][i] for i in indi])) >1 ]
discarded_components=[]
for i in range(len(intersting_boxes)-1):
for_i_stop=0
boxi_set=set(intersting_boxes[i])
for j in range(i+1,len(intersting_boxes)):
boxj_set=set(intersting_boxes[j])
if boxj_set.issubset(boxi_set):
discarded_components.append(j)
elif boxi_set < boxj_set:
discarded_components.append(i)
intersting_boxes=[intersting_boxes[i] for i in range(len(intersting_boxes)) \
if i not in discarded_components]
interesting_boxes_flattened =[]
for Box_ind in intersting_boxes :
for j in Box_ind:
if j not in interesting_boxes_flattened:
interesting_boxes_flattened.append(j) #use a flattening function in numpy
#ploting_boxes([classes[0][i] for i in interesting_boxes_flattened ],[])
plane_components= planner_connected_compnants([classes[0][i] for i in interesting_boxes_flattened ])
#pprint(plane_components[0]);input()
end_combin=time.time()
combin.append(end_combin-start_combin)
H=[]
for plane_component in plane_components:
if len(plane_component)>1:
start_combin=time.time()
components=connected_compnants(plane_component)
pairs_of_branches=all_pairs_oflist(components)
end_combin=time.time()
combin.append(end_combin-start_combin)
for pair_branches in pairs_of_branches:
start_ball=time.time()
all_boxes=pair_branches[0]+pair_branches[1]
uni=[]
for box in all_boxes:
uni = d.box_union(uni,box)
t=estimating_t(pair_branches); t1 = d.ftconstructor(t[0],t[1]); t=[float(t1.lower()),float(t1.upper())];
r=[ [float(ri[0]),float(ri[1])] for ri in estimating_yandr(pair_branches)]
B_Ball=uni[:2] +r +[t]
cusp_Ball_solver(P,B_Ball,X)
#planeappend(B_Ball)
#print(B_Ball[:3])
Ball_generating_system(P,B_Ball,X,eps_min)
os.system("ibexsolve --eps-max="+ str(eps_max)+" --eps-min="+ str(eps_min) + " -s eq.txt > output.txt")
#input("hi")
Solutions=computing_boxes()
if Solutions != "Empty" and Solutions != [[],[]] :
cer_Solutions += Solutions[0]
uncer_Solutions += Solutions[1]
if Solutions==[[],[]] :
if d.width(B_Ball[:2]) > eps_min:
#new_B=d.box_union(d.F_Ballminus(B_Ball),d.F_Ballplus(B_Ball))
new_B=B_Ball[:2]+B[2:n]
new_boxes=enclosing_curve(system,new_B,X,eps_max=0.1*eps_max)
resul=enclosing_singularities(system,new_boxes,new_B,X,eps_max=0.1*eps_max)
cer_Solutions+= resul[0]+resul[1]
uncer_Solutions += resul[2]
boxes[1] += new_boxes[1]
else:
uncer_Solutions.append(B_Ball)
end_ball=time.time()
ball.append(end_ball-start_ball)
#There still the case B1B2[0],B1B2[1] are not disjoint
########################################################################################################
#Solving Ball for potential_cusp, a box in R^n such that C is not monotonic
########################################################################################################
start_combin=time.time()
checked_boxes=[]
all_boxes=boxes[0]+boxes[1]
checked_boxes=[]
mon_mid_cusp=[[0.5*(Bij[1]+Bij[0]) for Bij in Bi[:2] ] for Bi in classes[1] ]
mon_rad_cusp=[ max([0.5*(Bij[1]-Bij[0]) for Bij in Bi[:2]]) for Bi in classes[1] ]
potential_cusps=[tree.query_ball_point(m,r=(math.sqrt(2)*(r+eps_max))) for m,r in zip(mon_mid_cusp,mon_rad_cusp)]
end_combin=time.time()
combin.append(end_combin-start_combin)
for cusp_indx in range(len(classes[1])):
start_combin=time.time()
intersecting_boxes=[all_boxes[i] for i in potential_cusps[cusp_indx]\
if d.boxes_intersection(all_boxes[i],classes[1][cusp_indx])!=[] ] #contains all boxes that intersect the considered potential_cusp
#for potential_cusp in classes[1]:
###finding cusps (or small loops) in potential_cusp####
#plane_intersecting_boxes= intersect_in_2D([potential_cusp],classes[0]+classes[1]+classes[2],monotonicity=0)
#intersecting_boxes= [pair_i[1] for pair_i in plane_intersecting_boxes \
# if d.boxes_intersection(pair_i[1], potential_cusp)!=[] ]
##########
H=[]
uni= classes[1][cusp_indx][:]
potential_cusp= classes[1][cusp_indx][:]
checked_boxes.append(potential_cusp)
for box in intersecting_boxes:
if box in checked_boxes:
continue
uni = d.box_union(uni,box)
checked_boxes.append(box)
end_combin=time.time()
combin.append(end_combin-start_combin)
#max_q1q2=d.distance(uni[2:],uni[2:])
#max_q1q2=d.ftconstructor(max_q1q2[0],max_q1q2[1])
#t=d.power_interval(max_q1q2,2)/4
#t=[float(t.lower()),float(t.upper())]
#if t[0]<0:
# t[0]=-0.1
start_ball=time.time()
t=estimating_t([[potential_cusp],[potential_cusp]])
"""if t[1]-t[0] < 1e-07:
t[0]=t[0]-0.5 * eps_min
t[1]=t[1]+0.5 * eps_min"""
B_Ball=uni +[[-1.01,1.01]]*(n-2)+[t]
H.append(B_Ball)
sol=cusp_Ball_solver(P,B_Ball,X)
if sol != "Empty" and sol != [[],[]]:
cer_Solutions += sol[0]
uncer_Solutions += sol[1]
if sol == [[],[]]:
uncer_Solutions.append(B_Ball)
end_ball=time.time()
ball.append(end_ball-start_ball)
####finding nodes that have the same projection with potential_cusp
start_combin=time.time()
non_intersecting_boxes=[all_boxes[i] for i in potential_cusps[cusp_indx]\
if d.boxes_intersection(all_boxes[i],classes[1][cusp_indx])==[] ] #contains all boxes that don't intersect the considered potential_cusp but in 2d
#non_intersecting_boxes= [pair_i[1] for pair_i in plane_intersecting_boxes \
# if d.boxes_intersection(pair_i[1], potential_cusp)==[] ]
end_combin=time.time()
combin.append(end_combin-start_combin)
for aligned in non_intersecting_boxes:
start_ball=time.time()
if aligned in checked_boxes:
continue
boxes_intersect_aligned=[B for B in non_intersecting_boxes if d.boxes_intersection(aligned,B) != [] ]
uni=aligned[:]
for boxi in boxes_intersect_aligned:
if boxi in checked_boxes:
continue
uni=d.box_union(uni,boxi)
checked_boxes.append(boxi)
t=estimating_t([[potential_cusp],[uni]])
"""if t[1]-t[0] < 1e-07:
t[0]=t[0]-0.5 * eps_min
t[1]=t[1]+0.5 * eps_min"""
r=[ [float(ri[0]),float(ri[1])] for ri in estimating_yandr([[potential_cusp],[uni]])]
B_Ball=potential_cusp[:2]+r +[t]
H.append(H)
Ball_generating_system(P,B_Ball,X)
os.system("ibexsolve --eps-max="+ str(eps_max)+" --eps-min="+ str(eps_min) + " -s eq.txt > output.txt")
Solutions=computing_boxes()
if Solutions != "Empty":
cer_Solutions += Solutions[0]
uncer_Solutions += Solutions[1]
elif Solutions == [[],[]]:
uncer_Solutions.append(B_Ball)
end_ball=time.time()
ball.append(end_ball-start_ball)
nodes=[]
cups_or_smallnodes=[]
start_combin=time.time()
checker=projection_checker(cer_Solutions)
uncer_Solutions= uncer_Solutions +checker[1]
cer_Solutions=[Bi for Bi in checker[0] if Bi[2*n-2][1] >= 0 ]
for solution in cer_Solutions :
if 0 >= solution[2*n-2][0] and 0 <= solution[2*n-2][1]:
cups_or_smallnodes.append(solution)
else:
nodes.append(solution)
end_combin=time.time()
combin.append(end_combin-start_combin)
print("KDtree ",sum(combin),"Ball ", sum(ball) )
return [nodes,cups_or_smallnodes, uncer_Solutions ]
System="system12.txt"
Box = [[-2, 2] , [-4, 4.5] , [-0.2, 43.9]]
Box = [[-1, 4], [-1, 4],[0,25],[-4.8, -1.4]]
#Box=[[0.65,0.85],[-0.3,0.1],[-0.2, 45]]#, [-4.8,-1.4]]
#Box=[[-10.1,10.1],[-10.1,10.1], [0,40.1]]
X=[sp.Symbol("x"+str(i)) for i in range(1,5)]
start_enc=time.time()
boxes =enclosing_curve(System,Box,X,eps_max=0.1,eps_min=0.0001)
end_enc=time.time()
print("enclosing_curve", end_enc-start_enc )
t1=time.time()
nodes,cups_or_smallnodes,uncer_Solutions=enclosing_singularities(System,boxes,Box,X,eps_max=0.1, eps_min=0.0001)
print(time.time()-t1)
print(len(boxes[0]),len(boxes[1]))
print(len(nodes),len(uncer_Solutions ))
e=[]
for i in range(len(nodes)-1):
for j in range(i+1,len(nodes)):
if d.boxes_intersection(nodes[i],nodes[j]) != []:
e.append(j)
print(len([nodes[i] for i in range(len(nodes)) if i not in e ]))
ploting_boxes(boxes[0],boxes[1] ,B=Box[:2], nodes = nodes,x=0.007, cusps= cups_or_smallnodes,uncer_Solutions=uncer_Solutions,color="green" ,Legend=False)
#plotting_3D(boxes[0],Box);input()
"""number_execution, total_time = timeit.Timer("boxes =enclosing_curve(System,Box,X,eps_max=0.1,eps_min=0.0000001)"\
, globals=globals()).autorange()
average_time = total_time / number_execution
print(average_time);
boxes =enclosing_curve(System,Box,X,eps_max=0.1,eps_min=0.0000001)
number_execution, total_time = timeit.Timer("nodes,cups_or_smallnodes,uncer_Solutions=enclosing_singularities(System,boxes,Box,X,eps_max=0.1, eps_min=0.00001)", globals=globals()).autorange()
average_time = total_time / number_execution
print(average_time);
#ploting_boxes(boxes[0],boxes[1] ,B=Box[:2], nodes = nodes,x=0.008, cusps= cups_or_smallnodes,uncer_Solutions=uncer_Solutions,color="green" ,Legend=True)"""
"""boxes =enclosing_curve(System,Box,X,eps=0.1)
number_execution, total_time = timeit.Timer("nodes, cups_or_smallnodes,uncer_Solutions=enclosing_singularities(System,boxes,Box,X, eps_min=0.000001);", globals=globals()).autorange()
average_time = total_time / number_execution
print(average_time);
nodes, cups_or_smallnodes,uncer_Solutions=enclosing_singularities(System,boxes,Box,X, eps_min=0.000001);
#nodes, cups_or_smallnodes,uncer_Solutions=enclosing_singularities(System,boxes,Box,X,eps_min=0.000001)"""
#plotting the singularities
#ploting_boxes(boxes[0],boxes[1] ,B=Box[:2], nodes = nodes,x=0.1, cusps= cups_or_smallnodes,uncer_Solutions=uncer_Solutions,color="green" ,Legend=True)
##################################
#Declaring parameters #######
##################################
"""System="system.txt"
Box=[[-5,15],[-15,15],[-3.14,3.14],[-3.14,3.14]]
X=[sp.Symbol("x"+str(i)) for i in range(1,5)]
##################################
#Applying the function #######
##################################
boxes =enclosing_curve(System,Box,X)
"""
|
[
"\nimport math\nimport matplotlib.pyplot as plt\nimport os\nimport pickle \nimport interval_arithmetic as d\n\nfrom pprint import pprint\nfrom sympy.parsing.sympy_parser import parse_expr\nimport sympy as sp \nimport os \nfrom cusp import cusp_Ball_solver, evaluation_exp\n\nimport matplotlib.patches as mpatches\nimport csv\nfrom scipy import spatial\nimport flint as ft\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection\nimport itertools\nimport timeit\nimport time\n\n\n\ndef ploting_boxes(boxes,uncer_boxes, var=[0,1], B=[[-20,20],[-20,20]],x=0.1,nodes=[], cusps=[],uncer_Solutions=[],Legend=False,color=\"green\",variabel_name=\"x\" ):\n fig, ax = plt.subplots()\n #plt.grid(True)\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name+str(1))\n ax.set_ylabel(variabel_name+str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n \n #textstr = open(\"system.txt\",\"r\").read()\n #props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n #ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=9,\n # verticalalignment='top', bbox=props)\n c=0\n green_patch = mpatches.Patch(color=color, label='smooth part') \n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',fill=None)\n cusp_patch = mpatches.Patch(color='blue', label='Projection of certified solution with t=0 ',fill=None)\n if Legend==True:\n plt.legend(handles=[green_patch,red_patch,node_patch,cusp_patch])\n for box in boxes:\n rectangle= plt.Rectangle((box[var[0]][0],box[var[1]][0]) , \\\n (box[var[0]][1]-box[var[0]][0]),(box[var[1]][1]-box[var[1]][0]),color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle= plt.Rectangle((box[var[0]][0],box[var[1]][0]) , \\\n (box[var[0]][1]-box[var[0]][0]),(box[var[1]][1]-box[var[1]][0]), fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle= plt.Rectangle((box[0][0]-x,box[1][0]-x) ,\\\n 2*x+box[0][1]-box[0][0],2*x+box[1][1]-box[1][0], fc='y',fill=None)\n plt.gca().add_patch(rectangle) \n for box in cusps:\n rectangle= plt.Rectangle((box[0][0]-x,box[1][0]-x) ,\\\n 2*x+box[0][1]-box[0][0],2*x+box[1][1]-box[1][0], fc='y',color=\"blue\",fill=None)\n plt.gca().add_patch(rectangle) \n for box in uncer_Solutions:\n rectangle= plt.Rectangle((box[0][0]-x,box[1][0]-x) ,\\\n 2*x+box[0][1]-box[0][0],2*x+box[1][1]-box[1][0], fc='y',color=\"red\",fill=None)\n plt.gca().add_patch(rectangle) \n plt.savefig(\"fig.jpg\",dpi=1000) \n plt.show()\ndef Ball_node_gen(equations,B_Ball,X):\n P=open(equations,\"r\").readlines()\n P=[Pi.replace('\\n','') for Pi in P]\n n=len(X)\n V=\"\"\" Variables \\n \"\"\"\n for i in range(n):\n V += \"x\" +str(i+1) + \" in \" + str(B_Ball[i]) +\" ; \\n\"\n for i in range(n,2*n-2):\n V += \"r\" +str(i-n+3) + \" in \" + str(B_Ball[i]) +\" ; \\n\" \n V += \"t\" + \" in \" + str(B_Ball[2*n-2]) +\" ; \\n\" \n V +=\"Constraints \\n\" \n for Pi in P:\n V += SDP_str(Pi,X)[0]\n V += SDP_str(Pi,X)[1]\n last_eq=\"\"\n for i in range(3,n):\n last_eq += \"r\"+str(i)+\"^2+\"\n last_eq += \"r\" +str(n)+\"^2 -1=0;\" \n V += last_eq +\"\\n\"\n f= open(\"eq.txt\",\"w+\")\n f.write(V) \n f.write(\"end\")\n f.close() \ndef Ball_solver(equations,B_Ball,X): #the width condition needs to be added Do not suse this one \n\tL=[B_Ball]\n\tcertified_boxes=[]\n\tuncertified_boxes=[]\n\tn=len(X)\n\twhile len(L) !=0: \n\t\tsolvability=1\n\t\tif B_Ball[2*n-2][0] <= 0 <= B_Ball[2*n-2][1] and \\\n\t\td.width([ d.ftconstructor(Bi[0],Bi[1]) for Bi in L[0] ] ) <0.1 :\n\t\t\tBall_cusp_gen(equations,B_Ball,X)\n\t\telif (B_Ball[2*n-2][0] > 0 or 0 > B_Ball[2*n-2][1] ) \\\n\t\tand d.width([ d.ftconstructor(Bi[0],Bi[1]) for Bi in L[0] ] ) <0.1:\n\t\t\tBall_node_gen(equations,B_Ball,X)\n\t\telse:\n\t\t\tchildren=cb.plane_subdivision(L[0])\n\t\t\tL.remove(L[0])\n\t\t\tL += children\n\t\t\tsolvability=0\n\t\tif solvability==1:\n\t\t\tibex_output=cb.solving_with_ibex()\n\t\t\tif ibex_output[0]== \"Empty\":\n\t\t \n\t\t\t L.remove(L[0])\n\t\t\telif len(ibex_output[0]) !=0: \n\t\t \n\t\t\t certified_boxes +=cb.computing_boxes(ibex_output[0])\n\t\t\t L.remove(L[0])\n\t\t\telif len(ibex_output[1])!=0: \n\t\t \n\t\t\t uncertified_boxes +=cb.computing_boxes(ibex_output[1])\n\t\t\t L.remove(L[0])\n\t\t\telse: \n\t\t\t children=cb.plane_subdivision(L[0])\n\t\t\t L.remove(L[0])\n\t\t\t L += children\n\t\t\n\treturn [certified_boxes,uncertified_boxes]\t\t \ndef SDP_str(P,X):\n n=len(X)\n P_pluse=P[:]\n P_minus=P[:]\n for i in range(2,n):\n P_pluse=P_pluse.replace(\"x\"+str(i+1),\"(x\"+str(i+1) + \"+ r\"+str(i+1) +\"*sqrt(t))\")\n P_minus=P_minus.replace(\"x\"+str(i+1),\"(x\"+str(i+1) + \"- r\"+str(i+1) +\"*sqrt(t))\")\n SP= \"0.5*(\" + P_pluse + \"+\" +P_minus+\")=0; \\n\"\n DP= \"0.5*(\" + P_pluse + \"- (\" +P_minus+\") )/(sqrt(t))=0; \\n\"\n return [SP,DP]\ndef Ball_generating_system(P,B_Ball,X,eps_min=0.001):\n n=len(X)\n V=\"\"\" Variables \\n \"\"\"\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += \"x\" +str(i+1) + \" in \" + str(B_Ball[i]) +\" ; \\n\"\n else: \n V += \"x\" +str(i+1) + \" in \" + str([B_Ball[i][0]-eps_min, B_Ball[i][1]+eps_min]) +\" ; \\n\"\n for i in range(n,2*n-2):\n V += \"r\" +str(i-n+3) + \" in \" + str(B_Ball[i]) +\" ; \\n\" \n V += \"t\" + \" in \" + str(B_Ball[2*n-2]) +\" ; \\n\" \n V +=\"Constraints \\n\" \n for Pi in P:\n V += SDP_str(Pi,X)[0]\n V += SDP_str(Pi,X)[1]\n\n last_eq=\"\"\n for i in range(3,n):\n last_eq += \"r\"+str(i)+\"^2+\"\n last_eq += \"r\" +str(n)+\"^2 -1=0;\" \n\n V += last_eq +\"\\n\"\n\n f= open(\"eq.txt\",\"w+\")\n f.write(V) \n f.write(\"end\")\n f.close()\ndef intersting_boxes1(f,b):\n pickle_in=open(f,\"rb\")\n curve=pickle.load(pickle_in)\n pickle_in.close()\n intersting_boxes=[]\n uncer_boxes=[]\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <=b[0][1] and \\\n b[1][0] <= box[1][0] <= box[1][1] <=b[1][1]:\n intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <=b[0][1] and \\\n b[1][0] <= box[1][0] <= box[1][1] <=b[1][1]:\n uncer_boxes.append(box)\n return [intersting_boxes,uncer_boxes] \ndef intersting_boxes(curve,b):\n cer_intersting_boxes=[]\n uncer_intersting_boxes=[]\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <=b[0][1] and \\\n b[1][0] <= box[1][0] <= box[1][1] <=b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <=b[0][1] and \\\n b[1][0] <= box[1][0] <= box[1][1] <=b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes,uncer_intersting_boxes] \ndef ibex_output(P,B,X):\n os.system(\"ibexsolve --eps-max=0.1 -s eq.txt > output.txt\")\n g=open('output.txt','r')\n result=g.readlines()\n T=computing_boxes(result)\n\n return T \ndef estimating_t1(components,upper_bound=200000): #it works only if len(components)\n t1=upper_bound\n t2=0\n for box1 in components[0]:\n for box2 in components[1]:\n a=d.distance(box1,box2).lower()\n b=d.distance(box1,box2).upper()\n if t1 > a:\n t1=a \n if t2<b:\n t2=b \n t=d.ftconstructor(t1,t2)\n t=0.25*d.power_interval(t,2) \n\n return [float(t.lower()),float(t.upper())] \ndef estimating_t(components,upper_bound=19000.8): #it works only if len(components)==2\n t1=upper_bound\n t2=0\n for box1 in components[0]:\n for box2 in components[1]:\n a=d.distance(box1[2:],box2[2:])\n if t1 > a[0]:\n t1=a[0]\n if t2<a[1]:\n t2=a[1] \n t=d.ftconstructor(t1,t2)\n t=0.25*d.power_interval(t,2) \n return [float(t.lower()),float(t.upper())] \n\ndef boxes_compare(box1,box2):\n flage=0\n for i in range(len(box1)-1,-1,-1):\n\n if box1[i][0] > box2[i][0]: \n return 1\n if box1[i][0] < box2[i][0]: \n return -1\n return 0 \ndef boxes_sort(boxes):\n sorted_boxes=boxes[:]\n for i in range(len(boxes)-1):\n for j in range(i+1,len(boxes)):\n if boxes_compare(sorted_boxes[i],sorted_boxes[j]) ==1:\n sorted_boxes[i], sorted_boxes[j] =sorted_boxes[j], sorted_boxes[i]\n return sorted_boxes \ndef connected_compnants(boxes):\n #ftboxes=[ [d.ftconstructor(boxi[0],boxi[1]) for boxi in box ] for box in boxes ]\n ftboxes=boxes[:]\n components=[[ftboxes[0]]]\n for i in range(1,len(ftboxes)):\n boxi_isused=0\n for j in range(len(components)):\n membership=0\n for k in range(len(components[j])): \n if d.boxes_intersection(ftboxes[i],components[j][k]) !=[] :\n components[j].append(ftboxes[i])\n membership=1\n boxi_isused=1\n break\n if membership==1:\n break \n if boxi_isused==0:\n components.append([ftboxes[i]])\n unused=list(range(len(components)))\n components1=components[:]\n components2=[]\n while len(components1) != len(components2) : \n for i in unused:\n for j in [j for j in list(range(i+1,len(components))) if j in unused ]:\n intersection_exists=False\n is_looping=True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi,boxj)!=[]:\n \n is_looping = False\n intersection_exists=True\n break\n if is_looping==False:\n break\n if intersection_exists== True:\n components[i] += components[j]\n unused.remove(j)\n\n components2=components1[:]\n components1=[components[k] for k in unused ]\n \n return components1 \n\ndef planner_connected_compnants(boxes): \n if len(boxes)==0:\n return []\n ftboxes=boxes[:]\n #ftboxes=[ [d.ftconstructor(boxi[0],boxi[1]) for boxi in box ] for box in boxes ]\n components=[[ftboxes[0]] ]\n for i in range(1,len(ftboxes)):\n boxi_isused=0\n for j in range(len(components)):\n membership=0\n for k in range(len(components[j])): \n if d.boxes_intersection(ftboxes[i][:2],components[j][k][:2]) !=[]: # and \\\n #d.boxes_intersection(ftboxes[i],components[j][k]) ==[]:\n components[j].append(ftboxes[i])\n membership=1\n boxi_isused=1\n break \n if membership==1:\n break \n if boxi_isused==0:\n components.append([ftboxes[i]])\n \n unused=list(range(len(components)))\n components1=components[:]\n components2=[]\n while len(components1) != len(components2) :\n for i in unused:\n for j in [j for j in list(range(i+1,len(components))) if j in unused ]:\n intersection_exists=False\n is_looping=True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2],boxj[:2])!=[] :#and \\\n #d.boxes_intersection(boxi[:2],boxj[:2]) != [] :\n is_looping = False\n intersection_exists=True\n break\n if is_looping==False:\n break\n if intersection_exists== True:\n components[i] += components[j]\n unused.remove(j)\n components2=components1[:]\n components1=[components[k] for k in unused ] \n \n return components1 \ndef estimating_yandr(components,upper_bound=100000):\n r_bounds=[[upper_bound,0]]*(len(components[0][0])-2)\n r_list=[]\n y_list=[]\n for box1 in components[0]:\n for box2 in components[1]:\n ft_box1= [d.ftconstructor(Bi[0],Bi[1]) for Bi in box1 ]\n ft_box2= [d.ftconstructor(Bi[0],Bi[1]) for Bi in box2 ]\n \n y_list.append([0.5*(q1+q2) for q1,q2 in zip(ft_box1[2:],ft_box2[2:])])\n norm_q1q2=d.distance(box1[2:],box2[2:])\n norm_q1q2=d.ftconstructor(norm_q1q2[0],norm_q1q2[1])\n q1q2=[ft_box1[i]-ft_box2[i] for i in range(2,len(box1)) ]\n \n r=[ ri/norm_q1q2 for ri in q1q2 ]\n r_list.append(r)\n r=[]\n y=[]\n for i in range(len(y_list[0])):\n yi1=min([float(y[i].lower()) for y in y_list ])\n yi2=max([float(y[i].upper()) for y in y_list ])\n y.append([yi1,yi2])\n for i in range(len(r_list[0])):\n ri1=min([float(r[i].lower()) for r in r_list ])\n ri2=max([float(r[i].upper()) for r in r_list ])\n r.append([ri1,ri2]) \n\n return y+r \ndef detecting_nodes(boxes,B,f,X,eps): #boxes are list of cer and uncer curve\n mixes_boxes= [[1,box ] for box in boxes[0] ] +[[0,box ] for box in boxes[1]] #putting flaggs for cer and uncer boxes\n ftboxes=[ [box[0], [d.ftconstructor(boxi[0],boxi[1]) for boxi in box[1]] ] for box in mixes_boxes ] \n nodes_lifting=[]\n used=[]\n P=[ Pi.replace(\"\\n\",\"\") for Pi in open(f,\"r\").readlines() ]\n for i in range(len(ftboxes)):\n for j in range(i+1,len(ftboxes)):\n Mariam_ft=d.boxes_intersection(ftboxes[i][1],ftboxes[j][1])\n Mariam=[[float(Bi.lower()),float(Bi.upper()) ] for Bi in Mariam_ft]\n if (Mariam ==[] and \\\n d.boxes_intersection(ftboxes[i][1][:2],ftboxes[j][1][:2])) or\\\n (Mariam != [] and enclosing_curve(f,Mariam,X,eps_max=0.1) ==[[],[]] ): #needs to work more\n if i not in used:\n used.append(i)\n nodes_lifting.append(ftboxes[i])\n if j not in used:\n used.append(j)\n nodes_lifting.append(ftboxes[j])\n\n components= planner_connected_compnants(nodes_lifting)\n cer_components=[]\n uncer_components=[]\n component_normal=[]\n for component in components:\n boxes_component=[box[1] for box in component]\n component_normal =[ [[ float(Bi.lower()), float(Bi.upper()) ] for Bi in box[1] ] for box in component ]\n if 0 not in [ box[0] for box in component] and eval_file_gen(f,component_normal,X) ==\"[]\\n\" :\n cer_components.append(boxes_component)\n else: \n uncer_components.append(boxes_component)\n return [cer_components,uncer_components] \ndef intersect_in_2D(class1,class2,monotonicity=1):\n pl_intesected_pairs=[]\n if monotonicity==1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2],class2[j][:2]) !=[] and d.boxes_intersection(class1[i],class2[j]) ==[] :\n if [class2[j],class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i],class2[j]])\n elif monotonicity==0: \n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2],class2[j][:2]) !=[]:\n if [class2[j],class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i],class2[j]])\n elif monotonicity==2: \n inters_indic=[]\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2],class2[j][:2]) !=[]:\n inters_indic[i]= inters_indic[i] +[j] \n for k in range(len(class1)):\n if len(inters_indic[k])> 3:\n for j in range(len(inters_indic[k])):\n if [class2[j],class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n \n \n return pl_intesected_pairs \ndef solving_fornodes(equations,boxes,B,X,eps=0.1):\n plane_components=detecting_nodes(boxes,B,equations,X,eps)#[0]\n g=open(equations,'r')\n P=[ Pi.replace(\"\\n\",\"\") for Pi in g.readlines() ]\n Ball_solutions=[]\n for plane_component in plane_components:\n x1=float(min([ai[0].lower() for ai in plane_component]))\n x2=float(max([ai[0].upper() for ai in plane_component]))\n y1=float(min([ai[1].lower() for ai in plane_component]))\n y2=float(max([ai[1].upper() for ai in plane_component]))\n components=connected_compnants(plane_component)\n r=[ [float(ri[0]),float(ri[1])] for ri in estimating_r(components) ]\n t=estimating_t(components)\n t=[float(t[0]),float(t[1])]\n B_Ball=[[x1,x2],[y1,y2]]+r +[t]\n Ball_generating_system(P,B_Ball,X)\n solutionsi=ibex_output(P,B_Ball,X)\n Ball_solutions +=solutionsi\n return Ball_solutions\ndef normal_subdivision(B):\n\tft_B=d.subdivide([d.ftconstructor(Bi[0],Bi[1]) for Bi in B[:]])\n\treturn [d.ft_normal(Bi) for Bi in ft_B]\ndef plane_subdivision(B):\n\t\n\tft_B2=d.subdivide([d.ftconstructor(Bi[0],Bi[1]) for Bi in B[:2]])\n\tnormal_B2=[d.ft_normal(Bi) for Bi in ft_B2]\n\treturn d.cartesian_product(normal_B2,[B[2:]])\ndef system_generator(f,B,X):\n g = open(f, \"r\")\n L = g.readlines()\n g.close()\n f = open(\"eq.txt\", \"w+\")\n f.write(\"Variables \\n\")\n for i in range(len(X)):\n f.write(str(X[i]) + \" in \" + str(B[i]) + \" ; \\n\")\n f.write(\"Constraints \\n\")\n for Li in L:\n f.write(Li.replace(\"\\n\", \"\") + \"=0; \\n\")\n f.write(\"end \")\n f.close()\n\n return f\ndef solving_with_ibex(eps=0.1):\n\tuncer_content=[]\n\tcer_content=[]\n\tos.system(\"ibexsolve --eps-max=\"+ str(eps) +\" -s eq.txt > output.txt\")\n\tg=open('output.txt','r')\n\tresult=g.read()\n\twith open('output.txt') as f:\n\t\tif \"successful\" in result:\n\t\t\tcer_content = f.readlines()\n\t\telif \"infeasible\" not in result and \"done! but some boxes\" in result:\n\t\t\tuncer_content = f.readlines()\n\t\telif \"infeasible problem\" in result:\n\t\t\tuncer_content=\"Empty\"\n\t\t\tcer_content=\"Empty\"\n\treturn [cer_content,uncer_content]\t\t\t\ndef computing_boxes():\n if \"infeasible\" in open(\"output.txt\",\"r\").read():\n return \"Empty\"\n content=open(\"output.txt\",\"r\").readlines()\n cer=[]; uncer=[]\n i=0\n Answer=[]\n for fi in content:\n try:\n a=fi.index('(')\n b=fi.index(')')\n T=(fi[a:b+1]).replace('(','[')\n T=(fi[a:b+1]).replace('(','[')\n T=T.replace(')',']')\n T=T.split(\";\")\n E=[]\n i=0\n for Ti in T:\n Ti= Ti.replace('[',\"\")\n Ti= Ti.replace(']',\"\")\n Ti=Ti.replace('<','')\n Ti=Ti.replace('>','')\n x=Ti.index(\",\")\n a=float(Ti[:x])\n b=float(Ti[x+1:])\n E.append([])\n E[i]=[a,b]\n i+=1\n if \"solution n\" in fi or \"boundary n\" in fi:\n cer.append(E)\n elif \"unknown n\" in fi:\n uncer.append(E)\n except ValueError:\n pass \n return [cer,uncer] \ndef enclosing_curve(system,B,X,eps_min=0.1,eps_max=0.1): \n L=[B]\n certified_boxes=[]\n uncertified_boxes=[]\n while len(L) !=0: \n system_generator(system,L[0],X)\n os.system(\"ibexsolve --eps-max=\"+ str(eps_max)+\" --eps-min=\"+ str(eps_min) + \" -s eq.txt > output.txt\")\n content=open(\"output.txt\",\"r\").readlines()\n \n ibex_output=computing_boxes()\n #ibex_output=solving_with_ibex(eps)\n if ibex_output ==[[],[]] and max([Bi[1]-Bi[0] for Bi in L[0] ]) < eps_min : \n uncertified_boxes.append(L[0])\n L.remove(L[0]);\n\n elif ibex_output ==[[],[]] :\n children=plane_subdivision(L[0])\n L.remove(L[0]);\n L += children # print warning ################################################################\"\"\n\n elif ibex_output== \"Empty\":\n L.remove(L[0])\n\n else:\n\n if len(ibex_output[0]) !=0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1])!=0: \n uncertified_boxes += ibex_output[1]\n L.remove(L[0]) \n return [certified_boxes,uncertified_boxes] \ndef loopsfree_checker(f,certified_boxes,uncer_boxes,P): #Assumption: no cusps\n\tL=eval_file_gen(f,certified_boxes,X)\n\twhile L.replace('\\n',\"\") != \"[]\":\n\t\tL=L.replace('[','')\n\t\tL=L.replace(']','')\n\t\tL=L.replace('\\n','')\n\t\tL=L.split(\",\")\n\t\tfor i in L:\n\t\t\tchildren=normal_subdivision(certified_boxes[int(i)])\n\t\t\tcertified_boxes.remove(certified_boxes[int(i)])\n\t\t\tfor child in children:\n\t\t\t\tcer_children, uncer_children= enclosing_curve(f,child,X)\n\t\t\t\tcertified_boxes +=cer_children\n\t\t\t\tuncer_boxes +=uncer_children\n\t\tL = eval_file_gen(f,certified_boxes,X)\n\treturn L\t \ndef eval_file_gen(f,boxes,X,special_function=[]): #condition: len(boxes[0]) is even\n functions=[\"sin\",\"cos\",\"tan\",\"exp\"]+special_function\n if len(boxes[0])==0:\n return []\n n=len(boxes[0])\n m=len(boxes)\n g=open(f,'r')\n P_str=g.readlines()\n P_str= [Pi.replace('\\n','') for Pi in P_str]\n P_str= [Pi.replace('^','**') for Pi in P_str]\n P_exp= [parse_expr(Pi) for Pi in P_str]\n #computing jac and the minors\n jac=sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1=jac[:,1:].det()\n minor2=jac[:,[i for i in range(n) if i != 1] ].det()\n fil=open(\"evaluation_file1.py\",\"w\")\n fil.write(\"import flint as ft \\n\")\n fil.write(\"import sympy as sp \\n\")\n fil.write(\"import interval_arithmetic as d \\n\")\n fil.write(\"boxes=\"+str(boxes)+\"\\n\")\n fil.write(\"ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n\" )\n fil.write(\"n=len(boxes[0])\\n\")\n fil.write(\"m=len(boxes)\\n\")\n fil.write(\"m1=[]\\n\")\n fil.write(\"m2=[]\\n\")\n minor1_str=str(minor1)\n minor2_str=str(minor2)\n for i in range(n):\n minor1_str= minor1_str.replace(\"x\"+str(i+1),\"B[\"+str(i)+\"]\" )\n minor2_str= minor2_str.replace(\"x\"+str(i+1),\"B[\"+str(i)+\"]\" )\n for func in functions:\n minor1_str=minor1_str.replace(func,\"ft.arb.\"+func)\n minor2_str=minor2_str.replace(func,\"ft.arb.\"+func)\n fil.write(\"for B in ftboxes: \\n\")\n fil.write(\" m1.append(ft.arb(\"+ minor1_str + \")) \\n\")\n fil.write(\" m2.append( ft.arb(\"+ minor2_str + \")) \\n\") \n fil.write(\"innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n\")\n fil.write(\"print(innrer_loops)\\n\")\n fil.close()\n t=os.popen(\"python3 evaluation_file1.py \").read()\n return t\ndef boxes_classifier(system,boxes,X,special_function=[]):\n if len(boxes[0])==0:\n return [[],[],boxes[1]]\n certified_boxes ,uncer_boxes =boxes\n L=eval_file_gen(system,certified_boxes,X)\n if L==[]:\n return [[],[],uncer_boxes]\n it=0\n L=L.replace('[','')\n L=L.replace(']','')\n L=L.replace('\\n','')\n L=L.split(\",\")\n if L !=[\"\"]:\n L=[int(li) for li in L]\n return [ [certified_boxes[i] for i in range(len(certified_boxes)) if i not in L] ,\\\n [certified_boxes[i] for i in L ], \\\n uncer_boxes ]\n else:\n return [ [certified_boxes[i] for i in range(len(certified_boxes)) if i not in L] ,[], uncer_boxes ] #can be enhanced\ndef projection_checker(solutions):\n if len(solutions)==0:\n return [[],[]]\n m=len(solutions[0])\n n=int((m+1)/2)\n intersect_in2d=[[]]*len(solutions)\n for i in range(len(solutions)-1):\n for j in range(i+1,len(solutions)):\n if solutions[i]==solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2],solutions[j][:2]) !=[] and (\\\n (d.boxes_intersection(solutions[i][n:2*n-2],[[-Bi[1],-Bi[0]] for Bi in solutions[j][n:2*n-2]]) ==[] and \\\n d.boxes_intersection(solutions[i][n:2*n-2],[[Bi[0],Bi[1]] for Bi in solutions[j][n:2*n-2]]) ==[] ) \\\n or \\\n d.boxes_intersection(solutions[i][2:n]+[solutions[i][2*n-2]], solutions[j][2:n]+[solutions[j][2*n-2]]) ==[]) : \n intersect_in2d[i] = intersect_in2d[i]+[ j]\n\n accepted=[]\n acc_ind=[]\n unaccepted=[]\n unacc_ind=[]\n for i in range(len(solutions)):\n\n if len(intersect_in2d[i]) ==0 and i not in unacc_ind+acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind+acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind: \n unaccepted.append(solutions[k]) \n unacc_ind.append(k) \n #pprint(sp.Matrix(unaccepted));input()\n return [accepted, unaccepted] \t\t\ndef Ball_given_2nboxes(system,X, B1,B2, monotonicity_B1=1,monotonicity_B2=1):\n B1_ft=[d.ftconstructor(Bi[0],Bi[1]) for Bi in B1]\n B2_ft=[d.ftconstructor(Bi[0],Bi[1]) for Bi in B2]\n P=[Pi.replace(\"\\n\",\"\") for Pi in open(system,\"r\").readlines()]\n sol=\"Empty\"\n if d.boxes_intersection(B1_ft, B2_ft) ==[] and monotonicity_B1== monotonicity_B2==1:\n t=estimating_t([[B1_ft], [B2_ft]])\n y_and_r=estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d=d.boxes_intersection(B1_ft[:2],B2_ft[:2])\n intersec_B1B2_in2d=[ [float(Bi.lower()),float(Bi.upper())] for Bi in intersec_B1B2_in2d ]\n B_Ball=intersec_B1B2_in2d +y_and_r +[t]\n Ball_node_gen(system,B_Ball,X)\n os.system(\"ibexsolve --eps-max=0.1 -s eq.txt > output.txt\")\n sol=computing_boxes()\n #if d.boxes_intersection(B1_ft, B2_ft) ==[]:\n # pass\n return sol \ndef all_pairs_oflist(L):\n pairs=[]\n for i in range(len(L)-1):\n for j in range(i+1,len(L)):\n pairs.append([L[i],L[j]])\n return pairs \ndef checking_assumptions(curve_data): #the input of this function is the output of Ball_solver\n if len(curve_data[0][1]) !=0 :\n return 0\n Ball_sols_ft=[[d.ftconstructor(Bi[0],Bi[1]) for Bi in B] for B in curve_data[1][0]]+[[d.ftconstructor(Bi[0],Bi[1]) for Bi in B] for B in curve_data[1][1]]\n alph3=assum_alph3_checker(Ball_sols_ft)\n if alph3==1 :\n return 1\n else:\n return 0\ndef csv_saver(L,type_L=\"Ball\"):\n dic=[]\n if type_L== \"Ball\" :\n n=int((len(L[0])+1)/2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j][\"x\"+str(i+1)]=L[j][i]\n for i in range(n,2*n-2):\n dic[j][\"r\"+str(i+3-n)]=L[j][i]\n dic[j][\"t\"]= L[j][2*n-2]\n return dic \ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n\n fieldnames = dictlist[0].keys()\n\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close() \ndef assum_alph3_checker(solutions):\n comparing_list=[[]]*len(solutions)\n for i in range(len(solutions)-1):\n for j in range(i+1,len(solutions)):\n if d.boxes_intersection(solutions[i][:2],solutions[j][:2]) !=[]:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching=[len(T) for T in comparing_list]\n if max(matching) <=2:\n return 1\n else:\n return 0\n\ndef plotting_3D(boxes,Box,var=[0,1,2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel(\"x\"+str(var[0]+1))\n ax.set_ylabel(\"x\"+str(var[1]+1))\n ax.set_zlabel(\"x\"+str(var[2]+1))\n for box in boxes : \n V=[[box[j][0] for j in range(3)] , [box[j][1] for j in range(3)]]\n #ax.scatter3D(box[0], box[1], box[2])\n points =list(itertools.product(*box))\n faces=[[points[0],points[2],points[6],points[4]],\n [points[0],points[2],points[3],points[1]],\n [points[0],points[1],points[5],points[4]], \n [points[2],points[3],points[7],points[6]], \n [points[1],points[3],points[7],points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, \n facecolors='green', linewidths=1,edgecolors='green', alpha=.25))\n\n plt.show()\ndef enclosing_singularities(system,boxes,B,X,eps_max=0.1,eps_min=0.01): #there still computing Ball On the case where tow monotonic boxes intersect\n combin=[]\n ball=[]\n start_combin=time.time()\n n=len(B);\n P=[Pi.replace(\"\\n\",\"\") for Pi in open(system,\"r\").readlines()]\n certified_boxes, uncertified_boxes= boxes\n classes= boxes_classifier(system,boxes,X,special_function=[])\n cer_Solutions=[]\n uncer_Solutions=[]\n H=[]\n #############################################################################\n #Solving Ball for B1 and B2 in R^n such that C is monotonic in B1 and B2\n #######################################################################\n #monotonic_pairs=intersect_in_2D(classes[0],classes[0])\n #monotonic_componants=[ Bi[0] for Bi in monotonic_pairs ] +[ Bi[1] for Bi in monotonic_pairs ]\n #Guillaume's suggestion:\n mon_mid=[[0.5*(Bij[1]+Bij[0]) for Bij in Bi[:2] ] for Bi in classes[0] ]\n mon_rad=[ max([0.5*(Bij[1]-Bij[0]) for Bij in Bi[:2] ]) for Bi in classes[0] ]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes=[tree.query_ball_point(m,r=(math.sqrt(2))*r) for m,r in zip(mon_mid,mon_rad)] \n #Ask Guillaume why this step is needed:\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n\n intersting_boxes=[indi for indi in intersting_boxes if len(indi) >3 ]#and len(connected_compnants([classes[0][i] for i in indi])) >1 ]\n discarded_components=[]\n for i in range(len(intersting_boxes)-1):\n for_i_stop=0\n boxi_set=set(intersting_boxes[i])\n for j in range(i+1,len(intersting_boxes)):\n boxj_set=set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes=[intersting_boxes[i] for i in range(len(intersting_boxes)) \\\n if i not in discarded_components] \n\n interesting_boxes_flattened =[]\n for Box_ind in intersting_boxes :\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j) #use a flattening function in numpy \n\n #ploting_boxes([classes[0][i] for i in interesting_boxes_flattened ],[])\n \n\n plane_components= planner_connected_compnants([classes[0][i] for i in interesting_boxes_flattened ])\n #pprint(plane_components[0]);input()\n end_combin=time.time()\n combin.append(end_combin-start_combin)\n H=[]\n for plane_component in plane_components: \n if len(plane_component)>1:\n start_combin=time.time()\n components=connected_compnants(plane_component)\n pairs_of_branches=all_pairs_oflist(components)\n end_combin=time.time()\n combin.append(end_combin-start_combin)\n for pair_branches in pairs_of_branches:\n start_ball=time.time()\n all_boxes=pair_branches[0]+pair_branches[1]\n uni=[]\n for box in all_boxes:\n uni = d.box_union(uni,box)\n t=estimating_t(pair_branches); t1 = d.ftconstructor(t[0],t[1]); t=[float(t1.lower()),float(t1.upper())];\n r=[ [float(ri[0]),float(ri[1])] for ri in estimating_yandr(pair_branches)]\n B_Ball=uni[:2] +r +[t] \n cusp_Ball_solver(P,B_Ball,X)\n\n #planeappend(B_Ball) \n #print(B_Ball[:3])\n Ball_generating_system(P,B_Ball,X,eps_min)\n\n os.system(\"ibexsolve --eps-max=\"+ str(eps_max)+\" --eps-min=\"+ str(eps_min) + \" -s eq.txt > output.txt\")\n #input(\"hi\")\n Solutions=computing_boxes()\n if Solutions != \"Empty\" and Solutions != [[],[]] :\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions==[[],[]] :\n if d.width(B_Ball[:2]) > eps_min:\n #new_B=d.box_union(d.F_Ballminus(B_Ball),d.F_Ballplus(B_Ball))\n new_B=B_Ball[:2]+B[2:n]\n new_boxes=enclosing_curve(system,new_B,X,eps_max=0.1*eps_max)\n resul=enclosing_singularities(system,new_boxes,new_B,X,eps_max=0.1*eps_max)\n \n\n cer_Solutions+= resul[0]+resul[1] \n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else: \n uncer_Solutions.append(B_Ball)\n end_ball=time.time()\n ball.append(end_ball-start_ball) \n #There still the case B1B2[0],B1B2[1] are not disjoint \n ########################################################################################################\n #Solving Ball for potential_cusp, a box in R^n such that C is not monotonic \n ########################################################################################################\n start_combin=time.time()\n checked_boxes=[]\n all_boxes=boxes[0]+boxes[1]\n checked_boxes=[]\n mon_mid_cusp=[[0.5*(Bij[1]+Bij[0]) for Bij in Bi[:2] ] for Bi in classes[1] ]\n mon_rad_cusp=[ max([0.5*(Bij[1]-Bij[0]) for Bij in Bi[:2]]) for Bi in classes[1] ]\n potential_cusps=[tree.query_ball_point(m,r=(math.sqrt(2)*(r+eps_max))) for m,r in zip(mon_mid_cusp,mon_rad_cusp)]\n end_combin=time.time()\n combin.append(end_combin-start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin=time.time()\n intersecting_boxes=[all_boxes[i] for i in potential_cusps[cusp_indx]\\\n if d.boxes_intersection(all_boxes[i],classes[1][cusp_indx])!=[] ] #contains all boxes that intersect the considered potential_cusp \n \n #for potential_cusp in classes[1]:\n ###finding cusps (or small loops) in potential_cusp####\n \n #plane_intersecting_boxes= intersect_in_2D([potential_cusp],classes[0]+classes[1]+classes[2],monotonicity=0)\n #intersecting_boxes= [pair_i[1] for pair_i in plane_intersecting_boxes \\\n # if d.boxes_intersection(pair_i[1], potential_cusp)!=[] ] \n \n ##########\n \n H=[]\n uni= classes[1][cusp_indx][:]\n potential_cusp= classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni,box)\n checked_boxes.append(box)\n end_combin=time.time()\n combin.append(end_combin-start_combin) \n #max_q1q2=d.distance(uni[2:],uni[2:])\n #max_q1q2=d.ftconstructor(max_q1q2[0],max_q1q2[1])\n #t=d.power_interval(max_q1q2,2)/4\n #t=[float(t.lower()),float(t.upper())]\n #if t[0]<0:\n # t[0]=-0.1\n start_ball=time.time()\n t=estimating_t([[potential_cusp],[potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball=uni +[[-1.01,1.01]]*(n-2)+[t]\n H.append(B_Ball)\n \n sol=cusp_Ball_solver(P,B_Ball,X)\n if sol != \"Empty\" and sol != [[],[]]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[],[]]:\n uncer_Solutions.append(B_Ball) \n end_ball=time.time() \n ball.append(end_ball-start_ball) \n ####finding nodes that have the same projection with potential_cusp\n start_combin=time.time()\n non_intersecting_boxes=[all_boxes[i] for i in potential_cusps[cusp_indx]\\\n if d.boxes_intersection(all_boxes[i],classes[1][cusp_indx])==[] ] #contains all boxes that don't intersect the considered potential_cusp but in 2d\n #non_intersecting_boxes= [pair_i[1] for pair_i in plane_intersecting_boxes \\\n # if d.boxes_intersection(pair_i[1], potential_cusp)==[] ] \n end_combin=time.time()\n combin.append(end_combin-start_combin)\n for aligned in non_intersecting_boxes:\n start_ball=time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned=[B for B in non_intersecting_boxes if d.boxes_intersection(aligned,B) != [] ]\n uni=aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni=d.box_union(uni,boxi)\n checked_boxes.append(boxi)\n t=estimating_t([[potential_cusp],[uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r=[ [float(ri[0]),float(ri[1])] for ri in estimating_yandr([[potential_cusp],[uni]])]\n B_Ball=potential_cusp[:2]+r +[t] \n H.append(H) \n Ball_generating_system(P,B_Ball,X)\n os.system(\"ibexsolve --eps-max=\"+ str(eps_max)+\" --eps-min=\"+ str(eps_min) + \" -s eq.txt > output.txt\")\n Solutions=computing_boxes()\n if Solutions != \"Empty\":\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1] \n elif Solutions == [[],[]]:\n uncer_Solutions.append(B_Ball) \n end_ball=time.time()\n ball.append(end_ball-start_ball) \n nodes=[]\n cups_or_smallnodes=[]\n start_combin=time.time()\n checker=projection_checker(cer_Solutions)\n uncer_Solutions= uncer_Solutions +checker[1]\n cer_Solutions=[Bi for Bi in checker[0] if Bi[2*n-2][1] >= 0 ] \n for solution in cer_Solutions :\n if 0 >= solution[2*n-2][0] and 0 <= solution[2*n-2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution) \n end_combin=time.time()\n combin.append(end_combin-start_combin)\n print(\"KDtree \",sum(combin),\"Ball \", sum(ball) ) \n return [nodes,cups_or_smallnodes, uncer_Solutions ] \n\n\n\n\nSystem=\"system12.txt\" \nBox = [[-2, 2] , [-4, 4.5] , [-0.2, 43.9]]\nBox = [[-1, 4], [-1, 4],[0,25],[-4.8, -1.4]]\n#Box=[[0.65,0.85],[-0.3,0.1],[-0.2, 45]]#, [-4.8,-1.4]] \n#Box=[[-10.1,10.1],[-10.1,10.1], [0,40.1]] \n\nX=[sp.Symbol(\"x\"+str(i)) for i in range(1,5)]\nstart_enc=time.time()\n\nboxes =enclosing_curve(System,Box,X,eps_max=0.1,eps_min=0.0001)\nend_enc=time.time()\nprint(\"enclosing_curve\", end_enc-start_enc )\nt1=time.time()\nnodes,cups_or_smallnodes,uncer_Solutions=enclosing_singularities(System,boxes,Box,X,eps_max=0.1, eps_min=0.0001)\nprint(time.time()-t1)\nprint(len(boxes[0]),len(boxes[1]))\nprint(len(nodes),len(uncer_Solutions ))\ne=[]\nfor i in range(len(nodes)-1):\n for j in range(i+1,len(nodes)):\n if d.boxes_intersection(nodes[i],nodes[j]) != []:\n e.append(j)\nprint(len([nodes[i] for i in range(len(nodes)) if i not in e ]))\nploting_boxes(boxes[0],boxes[1] ,B=Box[:2], nodes = nodes,x=0.007, cusps= cups_or_smallnodes,uncer_Solutions=uncer_Solutions,color=\"green\" ,Legend=False)\n\n#plotting_3D(boxes[0],Box);input()\n\"\"\"number_execution, total_time = timeit.Timer(\"boxes =enclosing_curve(System,Box,X,eps_max=0.1,eps_min=0.0000001)\"\\\n , globals=globals()).autorange()\naverage_time = total_time / number_execution\nprint(average_time);\nboxes =enclosing_curve(System,Box,X,eps_max=0.1,eps_min=0.0000001)\nnumber_execution, total_time = timeit.Timer(\"nodes,cups_or_smallnodes,uncer_Solutions=enclosing_singularities(System,boxes,Box,X,eps_max=0.1, eps_min=0.00001)\", globals=globals()).autorange()\naverage_time = total_time / number_execution\nprint(average_time);\n#ploting_boxes(boxes[0],boxes[1] ,B=Box[:2], nodes = nodes,x=0.008, cusps= cups_or_smallnodes,uncer_Solutions=uncer_Solutions,color=\"green\" ,Legend=True)\"\"\"\n\"\"\"boxes =enclosing_curve(System,Box,X,eps=0.1)\nnumber_execution, total_time = timeit.Timer(\"nodes, cups_or_smallnodes,uncer_Solutions=enclosing_singularities(System,boxes,Box,X, eps_min=0.000001);\", globals=globals()).autorange()\naverage_time = total_time / number_execution\nprint(average_time);\nnodes, cups_or_smallnodes,uncer_Solutions=enclosing_singularities(System,boxes,Box,X, eps_min=0.000001);\n\n#nodes, cups_or_smallnodes,uncer_Solutions=enclosing_singularities(System,boxes,Box,X,eps_min=0.000001)\"\"\"\n\n#plotting the singularities\n#ploting_boxes(boxes[0],boxes[1] ,B=Box[:2], nodes = nodes,x=0.1, cusps= cups_or_smallnodes,uncer_Solutions=uncer_Solutions,color=\"green\" ,Legend=True)\n\n\n##################################\n#Declaring parameters #######\n##################################\n\"\"\"System=\"system.txt\" \nBox=[[-5,15],[-15,15],[-3.14,3.14],[-3.14,3.14]]\nX=[sp.Symbol(\"x\"+str(i)) for i in range(1,5)]\n##################################\n#Applying the function #######\n##################################\nboxes =enclosing_curve(System,Box,X)\n\"\"\"\n",
"import math\nimport matplotlib.pyplot as plt\nimport os\nimport pickle\nimport interval_arithmetic as d\nfrom pprint import pprint\nfrom sympy.parsing.sympy_parser import parse_expr\nimport sympy as sp\nimport os\nfrom cusp import cusp_Ball_solver, evaluation_exp\nimport matplotlib.patches as mpatches\nimport csv\nfrom scipy import spatial\nimport flint as ft\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection\nimport itertools\nimport timeit\nimport time\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\ndef Ball_node_gen(equations, B_Ball, X):\n P = open(equations, 'r').readlines()\n P = [Pi.replace('\\n', '') for Pi in P]\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef intersting_boxes1(f, b):\n pickle_in = open(f, 'rb')\n curve = pickle.load(pickle_in)\n pickle_in.close()\n intersting_boxes = []\n uncer_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_boxes.append(box)\n return [intersting_boxes, uncer_boxes]\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\ndef ibex_output(P, B, X):\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n g = open('output.txt', 'r')\n result = g.readlines()\n T = computing_boxes(result)\n return T\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef estimating_yandr(components, upper_bound=100000):\n r_bounds = [[upper_bound, 0]] * (len(components[0][0]) - 2)\n r_list = []\n y_list = []\n for box1 in components[0]:\n for box2 in components[1]:\n ft_box1 = [d.ftconstructor(Bi[0], Bi[1]) for Bi in box1]\n ft_box2 = [d.ftconstructor(Bi[0], Bi[1]) for Bi in box2]\n y_list.append([(0.5 * (q1 + q2)) for q1, q2 in zip(ft_box1[2:],\n ft_box2[2:])])\n norm_q1q2 = d.distance(box1[2:], box2[2:])\n norm_q1q2 = d.ftconstructor(norm_q1q2[0], norm_q1q2[1])\n q1q2 = [(ft_box1[i] - ft_box2[i]) for i in range(2, len(box1))]\n r = [(ri / norm_q1q2) for ri in q1q2]\n r_list.append(r)\n r = []\n y = []\n for i in range(len(y_list[0])):\n yi1 = min([float(y[i].lower()) for y in y_list])\n yi2 = max([float(y[i].upper()) for y in y_list])\n y.append([yi1, yi2])\n for i in range(len(r_list[0])):\n ri1 = min([float(r[i].lower()) for r in r_list])\n ri2 = max([float(r[i].upper()) for r in r_list])\n r.append([ri1, ri2])\n return y + r\n\n\ndef detecting_nodes(boxes, B, f, X, eps):\n mixes_boxes = [[1, box] for box in boxes[0]] + [[0, box] for box in\n boxes[1]]\n ftboxes = [[box[0], [d.ftconstructor(boxi[0], boxi[1]) for boxi in box[\n 1]]] for box in mixes_boxes]\n nodes_lifting = []\n used = []\n P = [Pi.replace('\\n', '') for Pi in open(f, 'r').readlines()]\n for i in range(len(ftboxes)):\n for j in range(i + 1, len(ftboxes)):\n Mariam_ft = d.boxes_intersection(ftboxes[i][1], ftboxes[j][1])\n Mariam = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n Mariam_ft]\n if Mariam == [] and d.boxes_intersection(ftboxes[i][1][:2],\n ftboxes[j][1][:2]) or Mariam != [] and enclosing_curve(f,\n Mariam, X, eps_max=0.1) == [[], []]:\n if i not in used:\n used.append(i)\n nodes_lifting.append(ftboxes[i])\n if j not in used:\n used.append(j)\n nodes_lifting.append(ftboxes[j])\n components = planner_connected_compnants(nodes_lifting)\n cer_components = []\n uncer_components = []\n component_normal = []\n for component in components:\n boxes_component = [box[1] for box in component]\n component_normal = [[[float(Bi.lower()), float(Bi.upper())] for Bi in\n box[1]] for box in component]\n if 0 not in [box[0] for box in component] and eval_file_gen(f,\n component_normal, X) == '[]\\n':\n cer_components.append(boxes_component)\n else:\n uncer_components.append(boxes_component)\n return [cer_components, uncer_components]\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\ndef normal_subdivision(B):\n ft_B = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:]])\n return [d.ft_normal(Bi) for Bi in ft_B]\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\ndef solving_with_ibex(eps=0.1):\n uncer_content = []\n cer_content = []\n os.system('ibexsolve --eps-max=' + str(eps) + ' -s eq.txt > output.txt'\n )\n g = open('output.txt', 'r')\n result = g.read()\n with open('output.txt') as f:\n if 'successful' in result:\n cer_content = f.readlines()\n elif 'infeasible' not in result and 'done! but some boxes' in result:\n uncer_content = f.readlines()\n elif 'infeasible problem' in result:\n uncer_content = 'Empty'\n cer_content = 'Empty'\n return [cer_content, uncer_content]\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\ndef boxes_classifier(system, boxes, X, special_function=[]):\n if len(boxes[0]) == 0:\n return [[], [], boxes[1]]\n certified_boxes, uncer_boxes = boxes\n L = eval_file_gen(system, certified_boxes, X)\n if L == []:\n return [[], [], uncer_boxes]\n it = 0\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n if L != ['']:\n L = [int(li) for li in L]\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [certified_boxes[i] for i in L], uncer_boxes]\n else:\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [], uncer_boxes]\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\ndef all_pairs_oflist(L):\n pairs = []\n for i in range(len(L) - 1):\n for j in range(i + 1, len(L)):\n pairs.append([L[i], L[j]])\n return pairs\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\nSystem = 'system12.txt'\nBox = [[-2, 2], [-4, 4.5], [-0.2, 43.9]]\nBox = [[-1, 4], [-1, 4], [0, 25], [-4.8, -1.4]]\nX = [sp.Symbol('x' + str(i)) for i in range(1, 5)]\nstart_enc = time.time()\nboxes = enclosing_curve(System, Box, X, eps_max=0.1, eps_min=0.0001)\nend_enc = time.time()\nprint('enclosing_curve', end_enc - start_enc)\nt1 = time.time()\nnodes, cups_or_smallnodes, uncer_Solutions = enclosing_singularities(System,\n boxes, Box, X, eps_max=0.1, eps_min=0.0001)\nprint(time.time() - t1)\nprint(len(boxes[0]), len(boxes[1]))\nprint(len(nodes), len(uncer_Solutions))\ne = []\nfor i in range(len(nodes) - 1):\n for j in range(i + 1, len(nodes)):\n if d.boxes_intersection(nodes[i], nodes[j]) != []:\n e.append(j)\nprint(len([nodes[i] for i in range(len(nodes)) if i not in e]))\nploting_boxes(boxes[0], boxes[1], B=Box[:2], nodes=nodes, x=0.007, cusps=\n cups_or_smallnodes, uncer_Solutions=uncer_Solutions, color='green',\n Legend=False)\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\ndef Ball_node_gen(equations, B_Ball, X):\n P = open(equations, 'r').readlines()\n P = [Pi.replace('\\n', '') for Pi in P]\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef intersting_boxes1(f, b):\n pickle_in = open(f, 'rb')\n curve = pickle.load(pickle_in)\n pickle_in.close()\n intersting_boxes = []\n uncer_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_boxes.append(box)\n return [intersting_boxes, uncer_boxes]\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\ndef ibex_output(P, B, X):\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n g = open('output.txt', 'r')\n result = g.readlines()\n T = computing_boxes(result)\n return T\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef estimating_yandr(components, upper_bound=100000):\n r_bounds = [[upper_bound, 0]] * (len(components[0][0]) - 2)\n r_list = []\n y_list = []\n for box1 in components[0]:\n for box2 in components[1]:\n ft_box1 = [d.ftconstructor(Bi[0], Bi[1]) for Bi in box1]\n ft_box2 = [d.ftconstructor(Bi[0], Bi[1]) for Bi in box2]\n y_list.append([(0.5 * (q1 + q2)) for q1, q2 in zip(ft_box1[2:],\n ft_box2[2:])])\n norm_q1q2 = d.distance(box1[2:], box2[2:])\n norm_q1q2 = d.ftconstructor(norm_q1q2[0], norm_q1q2[1])\n q1q2 = [(ft_box1[i] - ft_box2[i]) for i in range(2, len(box1))]\n r = [(ri / norm_q1q2) for ri in q1q2]\n r_list.append(r)\n r = []\n y = []\n for i in range(len(y_list[0])):\n yi1 = min([float(y[i].lower()) for y in y_list])\n yi2 = max([float(y[i].upper()) for y in y_list])\n y.append([yi1, yi2])\n for i in range(len(r_list[0])):\n ri1 = min([float(r[i].lower()) for r in r_list])\n ri2 = max([float(r[i].upper()) for r in r_list])\n r.append([ri1, ri2])\n return y + r\n\n\ndef detecting_nodes(boxes, B, f, X, eps):\n mixes_boxes = [[1, box] for box in boxes[0]] + [[0, box] for box in\n boxes[1]]\n ftboxes = [[box[0], [d.ftconstructor(boxi[0], boxi[1]) for boxi in box[\n 1]]] for box in mixes_boxes]\n nodes_lifting = []\n used = []\n P = [Pi.replace('\\n', '') for Pi in open(f, 'r').readlines()]\n for i in range(len(ftboxes)):\n for j in range(i + 1, len(ftboxes)):\n Mariam_ft = d.boxes_intersection(ftboxes[i][1], ftboxes[j][1])\n Mariam = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n Mariam_ft]\n if Mariam == [] and d.boxes_intersection(ftboxes[i][1][:2],\n ftboxes[j][1][:2]) or Mariam != [] and enclosing_curve(f,\n Mariam, X, eps_max=0.1) == [[], []]:\n if i not in used:\n used.append(i)\n nodes_lifting.append(ftboxes[i])\n if j not in used:\n used.append(j)\n nodes_lifting.append(ftboxes[j])\n components = planner_connected_compnants(nodes_lifting)\n cer_components = []\n uncer_components = []\n component_normal = []\n for component in components:\n boxes_component = [box[1] for box in component]\n component_normal = [[[float(Bi.lower()), float(Bi.upper())] for Bi in\n box[1]] for box in component]\n if 0 not in [box[0] for box in component] and eval_file_gen(f,\n component_normal, X) == '[]\\n':\n cer_components.append(boxes_component)\n else:\n uncer_components.append(boxes_component)\n return [cer_components, uncer_components]\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\ndef normal_subdivision(B):\n ft_B = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:]])\n return [d.ft_normal(Bi) for Bi in ft_B]\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\ndef solving_with_ibex(eps=0.1):\n uncer_content = []\n cer_content = []\n os.system('ibexsolve --eps-max=' + str(eps) + ' -s eq.txt > output.txt'\n )\n g = open('output.txt', 'r')\n result = g.read()\n with open('output.txt') as f:\n if 'successful' in result:\n cer_content = f.readlines()\n elif 'infeasible' not in result and 'done! but some boxes' in result:\n uncer_content = f.readlines()\n elif 'infeasible problem' in result:\n uncer_content = 'Empty'\n cer_content = 'Empty'\n return [cer_content, uncer_content]\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\ndef boxes_classifier(system, boxes, X, special_function=[]):\n if len(boxes[0]) == 0:\n return [[], [], boxes[1]]\n certified_boxes, uncer_boxes = boxes\n L = eval_file_gen(system, certified_boxes, X)\n if L == []:\n return [[], [], uncer_boxes]\n it = 0\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n if L != ['']:\n L = [int(li) for li in L]\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [certified_boxes[i] for i in L], uncer_boxes]\n else:\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [], uncer_boxes]\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\ndef all_pairs_oflist(L):\n pairs = []\n for i in range(len(L) - 1):\n for j in range(i + 1, len(L)):\n pairs.append([L[i], L[j]])\n return pairs\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\nSystem = 'system12.txt'\nBox = [[-2, 2], [-4, 4.5], [-0.2, 43.9]]\nBox = [[-1, 4], [-1, 4], [0, 25], [-4.8, -1.4]]\nX = [sp.Symbol('x' + str(i)) for i in range(1, 5)]\nstart_enc = time.time()\nboxes = enclosing_curve(System, Box, X, eps_max=0.1, eps_min=0.0001)\nend_enc = time.time()\nprint('enclosing_curve', end_enc - start_enc)\nt1 = time.time()\nnodes, cups_or_smallnodes, uncer_Solutions = enclosing_singularities(System,\n boxes, Box, X, eps_max=0.1, eps_min=0.0001)\nprint(time.time() - t1)\nprint(len(boxes[0]), len(boxes[1]))\nprint(len(nodes), len(uncer_Solutions))\ne = []\nfor i in range(len(nodes) - 1):\n for j in range(i + 1, len(nodes)):\n if d.boxes_intersection(nodes[i], nodes[j]) != []:\n e.append(j)\nprint(len([nodes[i] for i in range(len(nodes)) if i not in e]))\nploting_boxes(boxes[0], boxes[1], B=Box[:2], nodes=nodes, x=0.007, cusps=\n cups_or_smallnodes, uncer_Solutions=uncer_Solutions, color='green',\n Legend=False)\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\ndef Ball_node_gen(equations, B_Ball, X):\n P = open(equations, 'r').readlines()\n P = [Pi.replace('\\n', '') for Pi in P]\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef intersting_boxes1(f, b):\n pickle_in = open(f, 'rb')\n curve = pickle.load(pickle_in)\n pickle_in.close()\n intersting_boxes = []\n uncer_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_boxes.append(box)\n return [intersting_boxes, uncer_boxes]\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\ndef ibex_output(P, B, X):\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n g = open('output.txt', 'r')\n result = g.readlines()\n T = computing_boxes(result)\n return T\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef estimating_yandr(components, upper_bound=100000):\n r_bounds = [[upper_bound, 0]] * (len(components[0][0]) - 2)\n r_list = []\n y_list = []\n for box1 in components[0]:\n for box2 in components[1]:\n ft_box1 = [d.ftconstructor(Bi[0], Bi[1]) for Bi in box1]\n ft_box2 = [d.ftconstructor(Bi[0], Bi[1]) for Bi in box2]\n y_list.append([(0.5 * (q1 + q2)) for q1, q2 in zip(ft_box1[2:],\n ft_box2[2:])])\n norm_q1q2 = d.distance(box1[2:], box2[2:])\n norm_q1q2 = d.ftconstructor(norm_q1q2[0], norm_q1q2[1])\n q1q2 = [(ft_box1[i] - ft_box2[i]) for i in range(2, len(box1))]\n r = [(ri / norm_q1q2) for ri in q1q2]\n r_list.append(r)\n r = []\n y = []\n for i in range(len(y_list[0])):\n yi1 = min([float(y[i].lower()) for y in y_list])\n yi2 = max([float(y[i].upper()) for y in y_list])\n y.append([yi1, yi2])\n for i in range(len(r_list[0])):\n ri1 = min([float(r[i].lower()) for r in r_list])\n ri2 = max([float(r[i].upper()) for r in r_list])\n r.append([ri1, ri2])\n return y + r\n\n\ndef detecting_nodes(boxes, B, f, X, eps):\n mixes_boxes = [[1, box] for box in boxes[0]] + [[0, box] for box in\n boxes[1]]\n ftboxes = [[box[0], [d.ftconstructor(boxi[0], boxi[1]) for boxi in box[\n 1]]] for box in mixes_boxes]\n nodes_lifting = []\n used = []\n P = [Pi.replace('\\n', '') for Pi in open(f, 'r').readlines()]\n for i in range(len(ftboxes)):\n for j in range(i + 1, len(ftboxes)):\n Mariam_ft = d.boxes_intersection(ftboxes[i][1], ftboxes[j][1])\n Mariam = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n Mariam_ft]\n if Mariam == [] and d.boxes_intersection(ftboxes[i][1][:2],\n ftboxes[j][1][:2]) or Mariam != [] and enclosing_curve(f,\n Mariam, X, eps_max=0.1) == [[], []]:\n if i not in used:\n used.append(i)\n nodes_lifting.append(ftboxes[i])\n if j not in used:\n used.append(j)\n nodes_lifting.append(ftboxes[j])\n components = planner_connected_compnants(nodes_lifting)\n cer_components = []\n uncer_components = []\n component_normal = []\n for component in components:\n boxes_component = [box[1] for box in component]\n component_normal = [[[float(Bi.lower()), float(Bi.upper())] for Bi in\n box[1]] for box in component]\n if 0 not in [box[0] for box in component] and eval_file_gen(f,\n component_normal, X) == '[]\\n':\n cer_components.append(boxes_component)\n else:\n uncer_components.append(boxes_component)\n return [cer_components, uncer_components]\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\ndef normal_subdivision(B):\n ft_B = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:]])\n return [d.ft_normal(Bi) for Bi in ft_B]\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\ndef solving_with_ibex(eps=0.1):\n uncer_content = []\n cer_content = []\n os.system('ibexsolve --eps-max=' + str(eps) + ' -s eq.txt > output.txt'\n )\n g = open('output.txt', 'r')\n result = g.read()\n with open('output.txt') as f:\n if 'successful' in result:\n cer_content = f.readlines()\n elif 'infeasible' not in result and 'done! but some boxes' in result:\n uncer_content = f.readlines()\n elif 'infeasible problem' in result:\n uncer_content = 'Empty'\n cer_content = 'Empty'\n return [cer_content, uncer_content]\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\ndef boxes_classifier(system, boxes, X, special_function=[]):\n if len(boxes[0]) == 0:\n return [[], [], boxes[1]]\n certified_boxes, uncer_boxes = boxes\n L = eval_file_gen(system, certified_boxes, X)\n if L == []:\n return [[], [], uncer_boxes]\n it = 0\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n if L != ['']:\n L = [int(li) for li in L]\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [certified_boxes[i] for i in L], uncer_boxes]\n else:\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [], uncer_boxes]\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\ndef all_pairs_oflist(L):\n pairs = []\n for i in range(len(L) - 1):\n for j in range(i + 1, len(L)):\n pairs.append([L[i], L[j]])\n return pairs\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\n<assignment token>\nprint('enclosing_curve', end_enc - start_enc)\n<assignment token>\nprint(time.time() - t1)\nprint(len(boxes[0]), len(boxes[1]))\nprint(len(nodes), len(uncer_Solutions))\n<assignment token>\nfor i in range(len(nodes) - 1):\n for j in range(i + 1, len(nodes)):\n if d.boxes_intersection(nodes[i], nodes[j]) != []:\n e.append(j)\nprint(len([nodes[i] for i in range(len(nodes)) if i not in e]))\nploting_boxes(boxes[0], boxes[1], B=Box[:2], nodes=nodes, x=0.007, cusps=\n cups_or_smallnodes, uncer_Solutions=uncer_Solutions, color='green',\n Legend=False)\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\ndef Ball_node_gen(equations, B_Ball, X):\n P = open(equations, 'r').readlines()\n P = [Pi.replace('\\n', '') for Pi in P]\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef intersting_boxes1(f, b):\n pickle_in = open(f, 'rb')\n curve = pickle.load(pickle_in)\n pickle_in.close()\n intersting_boxes = []\n uncer_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_boxes.append(box)\n return [intersting_boxes, uncer_boxes]\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\ndef ibex_output(P, B, X):\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n g = open('output.txt', 'r')\n result = g.readlines()\n T = computing_boxes(result)\n return T\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef estimating_yandr(components, upper_bound=100000):\n r_bounds = [[upper_bound, 0]] * (len(components[0][0]) - 2)\n r_list = []\n y_list = []\n for box1 in components[0]:\n for box2 in components[1]:\n ft_box1 = [d.ftconstructor(Bi[0], Bi[1]) for Bi in box1]\n ft_box2 = [d.ftconstructor(Bi[0], Bi[1]) for Bi in box2]\n y_list.append([(0.5 * (q1 + q2)) for q1, q2 in zip(ft_box1[2:],\n ft_box2[2:])])\n norm_q1q2 = d.distance(box1[2:], box2[2:])\n norm_q1q2 = d.ftconstructor(norm_q1q2[0], norm_q1q2[1])\n q1q2 = [(ft_box1[i] - ft_box2[i]) for i in range(2, len(box1))]\n r = [(ri / norm_q1q2) for ri in q1q2]\n r_list.append(r)\n r = []\n y = []\n for i in range(len(y_list[0])):\n yi1 = min([float(y[i].lower()) for y in y_list])\n yi2 = max([float(y[i].upper()) for y in y_list])\n y.append([yi1, yi2])\n for i in range(len(r_list[0])):\n ri1 = min([float(r[i].lower()) for r in r_list])\n ri2 = max([float(r[i].upper()) for r in r_list])\n r.append([ri1, ri2])\n return y + r\n\n\ndef detecting_nodes(boxes, B, f, X, eps):\n mixes_boxes = [[1, box] for box in boxes[0]] + [[0, box] for box in\n boxes[1]]\n ftboxes = [[box[0], [d.ftconstructor(boxi[0], boxi[1]) for boxi in box[\n 1]]] for box in mixes_boxes]\n nodes_lifting = []\n used = []\n P = [Pi.replace('\\n', '') for Pi in open(f, 'r').readlines()]\n for i in range(len(ftboxes)):\n for j in range(i + 1, len(ftboxes)):\n Mariam_ft = d.boxes_intersection(ftboxes[i][1], ftboxes[j][1])\n Mariam = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n Mariam_ft]\n if Mariam == [] and d.boxes_intersection(ftboxes[i][1][:2],\n ftboxes[j][1][:2]) or Mariam != [] and enclosing_curve(f,\n Mariam, X, eps_max=0.1) == [[], []]:\n if i not in used:\n used.append(i)\n nodes_lifting.append(ftboxes[i])\n if j not in used:\n used.append(j)\n nodes_lifting.append(ftboxes[j])\n components = planner_connected_compnants(nodes_lifting)\n cer_components = []\n uncer_components = []\n component_normal = []\n for component in components:\n boxes_component = [box[1] for box in component]\n component_normal = [[[float(Bi.lower()), float(Bi.upper())] for Bi in\n box[1]] for box in component]\n if 0 not in [box[0] for box in component] and eval_file_gen(f,\n component_normal, X) == '[]\\n':\n cer_components.append(boxes_component)\n else:\n uncer_components.append(boxes_component)\n return [cer_components, uncer_components]\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\ndef normal_subdivision(B):\n ft_B = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:]])\n return [d.ft_normal(Bi) for Bi in ft_B]\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\ndef solving_with_ibex(eps=0.1):\n uncer_content = []\n cer_content = []\n os.system('ibexsolve --eps-max=' + str(eps) + ' -s eq.txt > output.txt'\n )\n g = open('output.txt', 'r')\n result = g.read()\n with open('output.txt') as f:\n if 'successful' in result:\n cer_content = f.readlines()\n elif 'infeasible' not in result and 'done! but some boxes' in result:\n uncer_content = f.readlines()\n elif 'infeasible problem' in result:\n uncer_content = 'Empty'\n cer_content = 'Empty'\n return [cer_content, uncer_content]\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\ndef boxes_classifier(system, boxes, X, special_function=[]):\n if len(boxes[0]) == 0:\n return [[], [], boxes[1]]\n certified_boxes, uncer_boxes = boxes\n L = eval_file_gen(system, certified_boxes, X)\n if L == []:\n return [[], [], uncer_boxes]\n it = 0\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n if L != ['']:\n L = [int(li) for li in L]\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [certified_boxes[i] for i in L], uncer_boxes]\n else:\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [], uncer_boxes]\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\ndef all_pairs_oflist(L):\n pairs = []\n for i in range(len(L) - 1):\n for j in range(i + 1, len(L)):\n pairs.append([L[i], L[j]])\n return pairs\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\ndef Ball_node_gen(equations, B_Ball, X):\n P = open(equations, 'r').readlines()\n P = [Pi.replace('\\n', '') for Pi in P]\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef intersting_boxes1(f, b):\n pickle_in = open(f, 'rb')\n curve = pickle.load(pickle_in)\n pickle_in.close()\n intersting_boxes = []\n uncer_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_boxes.append(box)\n return [intersting_boxes, uncer_boxes]\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\ndef ibex_output(P, B, X):\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n g = open('output.txt', 'r')\n result = g.readlines()\n T = computing_boxes(result)\n return T\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef estimating_yandr(components, upper_bound=100000):\n r_bounds = [[upper_bound, 0]] * (len(components[0][0]) - 2)\n r_list = []\n y_list = []\n for box1 in components[0]:\n for box2 in components[1]:\n ft_box1 = [d.ftconstructor(Bi[0], Bi[1]) for Bi in box1]\n ft_box2 = [d.ftconstructor(Bi[0], Bi[1]) for Bi in box2]\n y_list.append([(0.5 * (q1 + q2)) for q1, q2 in zip(ft_box1[2:],\n ft_box2[2:])])\n norm_q1q2 = d.distance(box1[2:], box2[2:])\n norm_q1q2 = d.ftconstructor(norm_q1q2[0], norm_q1q2[1])\n q1q2 = [(ft_box1[i] - ft_box2[i]) for i in range(2, len(box1))]\n r = [(ri / norm_q1q2) for ri in q1q2]\n r_list.append(r)\n r = []\n y = []\n for i in range(len(y_list[0])):\n yi1 = min([float(y[i].lower()) for y in y_list])\n yi2 = max([float(y[i].upper()) for y in y_list])\n y.append([yi1, yi2])\n for i in range(len(r_list[0])):\n ri1 = min([float(r[i].lower()) for r in r_list])\n ri2 = max([float(r[i].upper()) for r in r_list])\n r.append([ri1, ri2])\n return y + r\n\n\ndef detecting_nodes(boxes, B, f, X, eps):\n mixes_boxes = [[1, box] for box in boxes[0]] + [[0, box] for box in\n boxes[1]]\n ftboxes = [[box[0], [d.ftconstructor(boxi[0], boxi[1]) for boxi in box[\n 1]]] for box in mixes_boxes]\n nodes_lifting = []\n used = []\n P = [Pi.replace('\\n', '') for Pi in open(f, 'r').readlines()]\n for i in range(len(ftboxes)):\n for j in range(i + 1, len(ftboxes)):\n Mariam_ft = d.boxes_intersection(ftboxes[i][1], ftboxes[j][1])\n Mariam = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n Mariam_ft]\n if Mariam == [] and d.boxes_intersection(ftboxes[i][1][:2],\n ftboxes[j][1][:2]) or Mariam != [] and enclosing_curve(f,\n Mariam, X, eps_max=0.1) == [[], []]:\n if i not in used:\n used.append(i)\n nodes_lifting.append(ftboxes[i])\n if j not in used:\n used.append(j)\n nodes_lifting.append(ftboxes[j])\n components = planner_connected_compnants(nodes_lifting)\n cer_components = []\n uncer_components = []\n component_normal = []\n for component in components:\n boxes_component = [box[1] for box in component]\n component_normal = [[[float(Bi.lower()), float(Bi.upper())] for Bi in\n box[1]] for box in component]\n if 0 not in [box[0] for box in component] and eval_file_gen(f,\n component_normal, X) == '[]\\n':\n cer_components.append(boxes_component)\n else:\n uncer_components.append(boxes_component)\n return [cer_components, uncer_components]\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\ndef normal_subdivision(B):\n ft_B = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:]])\n return [d.ft_normal(Bi) for Bi in ft_B]\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\ndef boxes_classifier(system, boxes, X, special_function=[]):\n if len(boxes[0]) == 0:\n return [[], [], boxes[1]]\n certified_boxes, uncer_boxes = boxes\n L = eval_file_gen(system, certified_boxes, X)\n if L == []:\n return [[], [], uncer_boxes]\n it = 0\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n if L != ['']:\n L = [int(li) for li in L]\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [certified_boxes[i] for i in L], uncer_boxes]\n else:\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [], uncer_boxes]\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\ndef all_pairs_oflist(L):\n pairs = []\n for i in range(len(L) - 1):\n for j in range(i + 1, len(L)):\n pairs.append([L[i], L[j]])\n return pairs\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\ndef Ball_node_gen(equations, B_Ball, X):\n P = open(equations, 'r').readlines()\n P = [Pi.replace('\\n', '') for Pi in P]\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef intersting_boxes1(f, b):\n pickle_in = open(f, 'rb')\n curve = pickle.load(pickle_in)\n pickle_in.close()\n intersting_boxes = []\n uncer_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_boxes.append(box)\n return [intersting_boxes, uncer_boxes]\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\ndef ibex_output(P, B, X):\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n g = open('output.txt', 'r')\n result = g.readlines()\n T = computing_boxes(result)\n return T\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n\n\ndef detecting_nodes(boxes, B, f, X, eps):\n mixes_boxes = [[1, box] for box in boxes[0]] + [[0, box] for box in\n boxes[1]]\n ftboxes = [[box[0], [d.ftconstructor(boxi[0], boxi[1]) for boxi in box[\n 1]]] for box in mixes_boxes]\n nodes_lifting = []\n used = []\n P = [Pi.replace('\\n', '') for Pi in open(f, 'r').readlines()]\n for i in range(len(ftboxes)):\n for j in range(i + 1, len(ftboxes)):\n Mariam_ft = d.boxes_intersection(ftboxes[i][1], ftboxes[j][1])\n Mariam = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n Mariam_ft]\n if Mariam == [] and d.boxes_intersection(ftboxes[i][1][:2],\n ftboxes[j][1][:2]) or Mariam != [] and enclosing_curve(f,\n Mariam, X, eps_max=0.1) == [[], []]:\n if i not in used:\n used.append(i)\n nodes_lifting.append(ftboxes[i])\n if j not in used:\n used.append(j)\n nodes_lifting.append(ftboxes[j])\n components = planner_connected_compnants(nodes_lifting)\n cer_components = []\n uncer_components = []\n component_normal = []\n for component in components:\n boxes_component = [box[1] for box in component]\n component_normal = [[[float(Bi.lower()), float(Bi.upper())] for Bi in\n box[1]] for box in component]\n if 0 not in [box[0] for box in component] and eval_file_gen(f,\n component_normal, X) == '[]\\n':\n cer_components.append(boxes_component)\n else:\n uncer_components.append(boxes_component)\n return [cer_components, uncer_components]\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\ndef normal_subdivision(B):\n ft_B = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:]])\n return [d.ft_normal(Bi) for Bi in ft_B]\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\ndef boxes_classifier(system, boxes, X, special_function=[]):\n if len(boxes[0]) == 0:\n return [[], [], boxes[1]]\n certified_boxes, uncer_boxes = boxes\n L = eval_file_gen(system, certified_boxes, X)\n if L == []:\n return [[], [], uncer_boxes]\n it = 0\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n if L != ['']:\n L = [int(li) for li in L]\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [certified_boxes[i] for i in L], uncer_boxes]\n else:\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [], uncer_boxes]\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\ndef all_pairs_oflist(L):\n pairs = []\n for i in range(len(L) - 1):\n for j in range(i + 1, len(L)):\n pairs.append([L[i], L[j]])\n return pairs\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\ndef Ball_node_gen(equations, B_Ball, X):\n P = open(equations, 'r').readlines()\n P = [Pi.replace('\\n', '') for Pi in P]\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\ndef ibex_output(P, B, X):\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n g = open('output.txt', 'r')\n result = g.readlines()\n T = computing_boxes(result)\n return T\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n\n\ndef detecting_nodes(boxes, B, f, X, eps):\n mixes_boxes = [[1, box] for box in boxes[0]] + [[0, box] for box in\n boxes[1]]\n ftboxes = [[box[0], [d.ftconstructor(boxi[0], boxi[1]) for boxi in box[\n 1]]] for box in mixes_boxes]\n nodes_lifting = []\n used = []\n P = [Pi.replace('\\n', '') for Pi in open(f, 'r').readlines()]\n for i in range(len(ftboxes)):\n for j in range(i + 1, len(ftboxes)):\n Mariam_ft = d.boxes_intersection(ftboxes[i][1], ftboxes[j][1])\n Mariam = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n Mariam_ft]\n if Mariam == [] and d.boxes_intersection(ftboxes[i][1][:2],\n ftboxes[j][1][:2]) or Mariam != [] and enclosing_curve(f,\n Mariam, X, eps_max=0.1) == [[], []]:\n if i not in used:\n used.append(i)\n nodes_lifting.append(ftboxes[i])\n if j not in used:\n used.append(j)\n nodes_lifting.append(ftboxes[j])\n components = planner_connected_compnants(nodes_lifting)\n cer_components = []\n uncer_components = []\n component_normal = []\n for component in components:\n boxes_component = [box[1] for box in component]\n component_normal = [[[float(Bi.lower()), float(Bi.upper())] for Bi in\n box[1]] for box in component]\n if 0 not in [box[0] for box in component] and eval_file_gen(f,\n component_normal, X) == '[]\\n':\n cer_components.append(boxes_component)\n else:\n uncer_components.append(boxes_component)\n return [cer_components, uncer_components]\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\ndef normal_subdivision(B):\n ft_B = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:]])\n return [d.ft_normal(Bi) for Bi in ft_B]\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\ndef boxes_classifier(system, boxes, X, special_function=[]):\n if len(boxes[0]) == 0:\n return [[], [], boxes[1]]\n certified_boxes, uncer_boxes = boxes\n L = eval_file_gen(system, certified_boxes, X)\n if L == []:\n return [[], [], uncer_boxes]\n it = 0\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n if L != ['']:\n L = [int(li) for li in L]\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [certified_boxes[i] for i in L], uncer_boxes]\n else:\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [], uncer_boxes]\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\ndef all_pairs_oflist(L):\n pairs = []\n for i in range(len(L) - 1):\n for j in range(i + 1, len(L)):\n pairs.append([L[i], L[j]])\n return pairs\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\ndef ibex_output(P, B, X):\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n g = open('output.txt', 'r')\n result = g.readlines()\n T = computing_boxes(result)\n return T\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n\n\ndef detecting_nodes(boxes, B, f, X, eps):\n mixes_boxes = [[1, box] for box in boxes[0]] + [[0, box] for box in\n boxes[1]]\n ftboxes = [[box[0], [d.ftconstructor(boxi[0], boxi[1]) for boxi in box[\n 1]]] for box in mixes_boxes]\n nodes_lifting = []\n used = []\n P = [Pi.replace('\\n', '') for Pi in open(f, 'r').readlines()]\n for i in range(len(ftboxes)):\n for j in range(i + 1, len(ftboxes)):\n Mariam_ft = d.boxes_intersection(ftboxes[i][1], ftboxes[j][1])\n Mariam = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n Mariam_ft]\n if Mariam == [] and d.boxes_intersection(ftboxes[i][1][:2],\n ftboxes[j][1][:2]) or Mariam != [] and enclosing_curve(f,\n Mariam, X, eps_max=0.1) == [[], []]:\n if i not in used:\n used.append(i)\n nodes_lifting.append(ftboxes[i])\n if j not in used:\n used.append(j)\n nodes_lifting.append(ftboxes[j])\n components = planner_connected_compnants(nodes_lifting)\n cer_components = []\n uncer_components = []\n component_normal = []\n for component in components:\n boxes_component = [box[1] for box in component]\n component_normal = [[[float(Bi.lower()), float(Bi.upper())] for Bi in\n box[1]] for box in component]\n if 0 not in [box[0] for box in component] and eval_file_gen(f,\n component_normal, X) == '[]\\n':\n cer_components.append(boxes_component)\n else:\n uncer_components.append(boxes_component)\n return [cer_components, uncer_components]\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\ndef normal_subdivision(B):\n ft_B = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:]])\n return [d.ft_normal(Bi) for Bi in ft_B]\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\ndef boxes_classifier(system, boxes, X, special_function=[]):\n if len(boxes[0]) == 0:\n return [[], [], boxes[1]]\n certified_boxes, uncer_boxes = boxes\n L = eval_file_gen(system, certified_boxes, X)\n if L == []:\n return [[], [], uncer_boxes]\n it = 0\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n if L != ['']:\n L = [int(li) for li in L]\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [certified_boxes[i] for i in L], uncer_boxes]\n else:\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [], uncer_boxes]\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\ndef all_pairs_oflist(L):\n pairs = []\n for i in range(len(L) - 1):\n for j in range(i + 1, len(L)):\n pairs.append([L[i], L[j]])\n return pairs\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\ndef ibex_output(P, B, X):\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n g = open('output.txt', 'r')\n result = g.readlines()\n T = computing_boxes(result)\n return T\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\ndef normal_subdivision(B):\n ft_B = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:]])\n return [d.ft_normal(Bi) for Bi in ft_B]\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\ndef boxes_classifier(system, boxes, X, special_function=[]):\n if len(boxes[0]) == 0:\n return [[], [], boxes[1]]\n certified_boxes, uncer_boxes = boxes\n L = eval_file_gen(system, certified_boxes, X)\n if L == []:\n return [[], [], uncer_boxes]\n it = 0\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n if L != ['']:\n L = [int(li) for li in L]\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [certified_boxes[i] for i in L], uncer_boxes]\n else:\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [], uncer_boxes]\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\ndef all_pairs_oflist(L):\n pairs = []\n for i in range(len(L) - 1):\n for j in range(i + 1, len(L)):\n pairs.append([L[i], L[j]])\n return pairs\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\ndef ibex_output(P, B, X):\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n g = open('output.txt', 'r')\n result = g.readlines()\n T = computing_boxes(result)\n return T\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\ndef boxes_classifier(system, boxes, X, special_function=[]):\n if len(boxes[0]) == 0:\n return [[], [], boxes[1]]\n certified_boxes, uncer_boxes = boxes\n L = eval_file_gen(system, certified_boxes, X)\n if L == []:\n return [[], [], uncer_boxes]\n it = 0\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n if L != ['']:\n L = [int(li) for li in L]\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [certified_boxes[i] for i in L], uncer_boxes]\n else:\n return [[certified_boxes[i] for i in range(len(certified_boxes)) if\n i not in L], [], uncer_boxes]\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\ndef all_pairs_oflist(L):\n pairs = []\n for i in range(len(L) - 1):\n for j in range(i + 1, len(L)):\n pairs.append([L[i], L[j]])\n return pairs\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\ndef ibex_output(P, B, X):\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n g = open('output.txt', 'r')\n result = g.readlines()\n T = computing_boxes(result)\n return T\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\ndef all_pairs_oflist(L):\n pairs = []\n for i in range(len(L) - 1):\n for j in range(i + 1, len(L)):\n pairs.append([L[i], L[j]])\n return pairs\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\ndef ibex_output(P, B, X):\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n g = open('output.txt', 'r')\n result = g.readlines()\n T = computing_boxes(result)\n return T\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\ndef dict2csv(dictlist, csvfile):\n \"\"\"\n Takes a list of dictionaries as input and outputs a CSV file.\n \"\"\"\n f = open(csvfile, 'wb')\n fieldnames = dictlist[0].keys()\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\ndef enclosing_singularities(system, boxes, B, X, eps_max=0.1, eps_min=0.01):\n combin = []\n ball = []\n start_combin = time.time()\n n = len(B)\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n certified_boxes, uncertified_boxes = boxes\n classes = boxes_classifier(system, boxes, X, special_function=[])\n cer_Solutions = []\n uncer_Solutions = []\n H = []\n mon_mid = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[0]]\n mon_rad = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for Bi in\n classes[0]]\n tree = spatial.KDTree(mon_mid)\n intersting_boxes = [tree.query_ball_point(m, r=math.sqrt(2) * r) for m,\n r in zip(mon_mid, mon_rad)]\n \"\"\"for i in range(len(ball)): \n for j in ball[i]:\n if i not in ball[j]:\n ball[j].append(i)\"\"\"\n intersting_boxes = [indi for indi in intersting_boxes if len(indi) > 3]\n discarded_components = []\n for i in range(len(intersting_boxes) - 1):\n for_i_stop = 0\n boxi_set = set(intersting_boxes[i])\n for j in range(i + 1, len(intersting_boxes)):\n boxj_set = set(intersting_boxes[j])\n if boxj_set.issubset(boxi_set):\n discarded_components.append(j)\n elif boxi_set < boxj_set:\n discarded_components.append(i)\n intersting_boxes = [intersting_boxes[i] for i in range(len(\n intersting_boxes)) if i not in discarded_components]\n interesting_boxes_flattened = []\n for Box_ind in intersting_boxes:\n for j in Box_ind:\n if j not in interesting_boxes_flattened:\n interesting_boxes_flattened.append(j)\n plane_components = planner_connected_compnants([classes[0][i] for i in\n interesting_boxes_flattened])\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n H = []\n for plane_component in plane_components:\n if len(plane_component) > 1:\n start_combin = time.time()\n components = connected_compnants(plane_component)\n pairs_of_branches = all_pairs_oflist(components)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for pair_branches in pairs_of_branches:\n start_ball = time.time()\n all_boxes = pair_branches[0] + pair_branches[1]\n uni = []\n for box in all_boxes:\n uni = d.box_union(uni, box)\n t = estimating_t(pair_branches)\n t1 = d.ftconstructor(t[0], t[1])\n t = [float(t1.lower()), float(t1.upper())]\n r = [[float(ri[0]), float(ri[1])] for ri in\n estimating_yandr(pair_branches)]\n B_Ball = uni[:2] + r + [t]\n cusp_Ball_solver(P, B_Ball, X)\n Ball_generating_system(P, B_Ball, X, eps_min)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt'\n )\n Solutions = computing_boxes()\n if Solutions != 'Empty' and Solutions != [[], []]:\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n if Solutions == [[], []]:\n if d.width(B_Ball[:2]) > eps_min:\n new_B = B_Ball[:2] + B[2:n]\n new_boxes = enclosing_curve(system, new_B, X,\n eps_max=0.1 * eps_max)\n resul = enclosing_singularities(system, new_boxes,\n new_B, X, eps_max=0.1 * eps_max)\n cer_Solutions += resul[0] + resul[1]\n uncer_Solutions += resul[2]\n boxes[1] += new_boxes[1]\n else:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n checked_boxes = []\n all_boxes = boxes[0] + boxes[1]\n checked_boxes = []\n mon_mid_cusp = [[(0.5 * (Bij[1] + Bij[0])) for Bij in Bi[:2]] for Bi in\n classes[1]]\n mon_rad_cusp = [max([(0.5 * (Bij[1] - Bij[0])) for Bij in Bi[:2]]) for\n Bi in classes[1]]\n potential_cusps = [tree.query_ball_point(m, r=math.sqrt(2) * (r +\n eps_max)) for m, r in zip(mon_mid_cusp, mon_rad_cusp)]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for cusp_indx in range(len(classes[1])):\n start_combin = time.time()\n intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) != []]\n H = []\n uni = classes[1][cusp_indx][:]\n potential_cusp = classes[1][cusp_indx][:]\n checked_boxes.append(potential_cusp)\n for box in intersecting_boxes:\n if box in checked_boxes:\n continue\n uni = d.box_union(uni, box)\n checked_boxes.append(box)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n start_ball = time.time()\n t = estimating_t([[potential_cusp], [potential_cusp]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n B_Ball = uni + [[-1.01, 1.01]] * (n - 2) + [t]\n H.append(B_Ball)\n sol = cusp_Ball_solver(P, B_Ball, X)\n if sol != 'Empty' and sol != [[], []]:\n cer_Solutions += sol[0]\n uncer_Solutions += sol[1]\n if sol == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n start_combin = time.time()\n non_intersecting_boxes = [all_boxes[i] for i in potential_cusps[\n cusp_indx] if d.boxes_intersection(all_boxes[i], classes[1][\n cusp_indx]) == []]\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n for aligned in non_intersecting_boxes:\n start_ball = time.time()\n if aligned in checked_boxes:\n continue\n boxes_intersect_aligned = [B for B in non_intersecting_boxes if\n d.boxes_intersection(aligned, B) != []]\n uni = aligned[:]\n for boxi in boxes_intersect_aligned:\n if boxi in checked_boxes:\n continue\n uni = d.box_union(uni, boxi)\n checked_boxes.append(boxi)\n t = estimating_t([[potential_cusp], [uni]])\n \"\"\"if t[1]-t[0] < 1e-07:\n t[0]=t[0]-0.5 * eps_min\n t[1]=t[1]+0.5 * eps_min\"\"\"\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_yandr([[\n potential_cusp], [uni]])]\n B_Ball = potential_cusp[:2] + r + [t]\n H.append(H)\n Ball_generating_system(P, B_Ball, X)\n os.system('ibexsolve --eps-max=' + str(eps_max) +\n ' --eps-min=' + str(eps_min) + ' -s eq.txt > output.txt')\n Solutions = computing_boxes()\n if Solutions != 'Empty':\n cer_Solutions += Solutions[0]\n uncer_Solutions += Solutions[1]\n elif Solutions == [[], []]:\n uncer_Solutions.append(B_Ball)\n end_ball = time.time()\n ball.append(end_ball - start_ball)\n nodes = []\n cups_or_smallnodes = []\n start_combin = time.time()\n checker = projection_checker(cer_Solutions)\n uncer_Solutions = uncer_Solutions + checker[1]\n cer_Solutions = [Bi for Bi in checker[0] if Bi[2 * n - 2][1] >= 0]\n for solution in cer_Solutions:\n if 0 >= solution[2 * n - 2][0] and 0 <= solution[2 * n - 2][1]:\n cups_or_smallnodes.append(solution)\n else:\n nodes.append(solution)\n end_combin = time.time()\n combin.append(end_combin - start_combin)\n print('KDtree ', sum(combin), 'Ball ', sum(ball))\n return [nodes, cups_or_smallnodes, uncer_Solutions]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\ndef loopsfree_checker(f, certified_boxes, uncer_boxes, P):\n L = eval_file_gen(f, certified_boxes, X)\n while L.replace('\\n', '') != '[]':\n L = L.replace('[', '')\n L = L.replace(']', '')\n L = L.replace('\\n', '')\n L = L.split(',')\n for i in L:\n children = normal_subdivision(certified_boxes[int(i)])\n certified_boxes.remove(certified_boxes[int(i)])\n for child in children:\n cer_children, uncer_children = enclosing_curve(f, child, X)\n certified_boxes += cer_children\n uncer_boxes += uncer_children\n L = eval_file_gen(f, certified_boxes, X)\n return L\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\ndef csv_saver(L, type_L='Ball'):\n dic = []\n if type_L == 'Ball':\n n = int((len(L[0]) + 1) / 2)\n for j in range(len(L)):\n dic.append({})\n for i in range(n):\n dic[j]['x' + str(i + 1)] = L[j][i]\n for i in range(n, 2 * n - 2):\n dic[j]['r' + str(i + 3 - n)] = L[j][i]\n dic[j]['t'] = L[j][2 * n - 2]\n return dic\n\n\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\ndef system_generator(f, B, X):\n g = open(f, 'r')\n L = g.readlines()\n g.close()\n f = open('eq.txt', 'w+')\n f.write('Variables \\n')\n for i in range(len(X)):\n f.write(str(X[i]) + ' in ' + str(B[i]) + ' ; \\n')\n f.write('Constraints \\n')\n for Li in L:\n f.write(Li.replace('\\n', '') + '=0; \\n')\n f.write('end ')\n f.close()\n return f\n\n\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\ndef Ball_given_2nboxes(system, X, B1, B2, monotonicity_B1=1, monotonicity_B2=1\n ):\n B1_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B1]\n B2_ft = [d.ftconstructor(Bi[0], Bi[1]) for Bi in B2]\n P = [Pi.replace('\\n', '') for Pi in open(system, 'r').readlines()]\n sol = 'Empty'\n if d.boxes_intersection(B1_ft, B2_ft) == [\n ] and monotonicity_B1 == monotonicity_B2 == 1:\n t = estimating_t([[B1_ft], [B2_ft]])\n y_and_r = estimating_yandr([[B1_ft], [B2_ft]])\n intersec_B1B2_in2d = d.boxes_intersection(B1_ft[:2], B2_ft[:2])\n intersec_B1B2_in2d = [[float(Bi.lower()), float(Bi.upper())] for Bi in\n intersec_B1B2_in2d]\n B_Ball = intersec_B1B2_in2d + y_and_r + [t]\n Ball_node_gen(system, B_Ball, X)\n os.system('ibexsolve --eps-max=0.1 -s eq.txt > output.txt')\n sol = computing_boxes()\n return sol\n\n\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef estimating_t(components, upper_bound=19000.8):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1[2:], box2[2:])\n if t1 > a[0]:\n t1 = a[0]\n if t2 < a[1]:\n t2 = a[1]\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef eval_file_gen(f, boxes, X, special_function=[]):\n functions = ['sin', 'cos', 'tan', 'exp'] + special_function\n if len(boxes[0]) == 0:\n return []\n n = len(boxes[0])\n m = len(boxes)\n g = open(f, 'r')\n P_str = g.readlines()\n P_str = [Pi.replace('\\n', '') for Pi in P_str]\n P_str = [Pi.replace('^', '**') for Pi in P_str]\n P_exp = [parse_expr(Pi) for Pi in P_str]\n jac = sp.Matrix(P_str).jacobian(sp.Matrix(X))\n minor1 = jac[:, 1:].det()\n minor2 = jac[:, [i for i in range(n) if i != 1]].det()\n fil = open('evaluation_file1.py', 'w')\n fil.write('import flint as ft \\n')\n fil.write('import sympy as sp \\n')\n fil.write('import interval_arithmetic as d \\n')\n fil.write('boxes=' + str(boxes) + '\\n')\n fil.write(\n 'ftboxes=[ [d.ftconstructor(Bi[0],Bi[1]) for Bi in B ] for B in boxes ] \\n'\n )\n fil.write('n=len(boxes[0])\\n')\n fil.write('m=len(boxes)\\n')\n fil.write('m1=[]\\n')\n fil.write('m2=[]\\n')\n minor1_str = str(minor1)\n minor2_str = str(minor2)\n for i in range(n):\n minor1_str = minor1_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n minor2_str = minor2_str.replace('x' + str(i + 1), 'B[' + str(i) + ']')\n for func in functions:\n minor1_str = minor1_str.replace(func, 'ft.arb.' + func)\n minor2_str = minor2_str.replace(func, 'ft.arb.' + func)\n fil.write('for B in ftboxes: \\n')\n fil.write(' m1.append(ft.arb(' + minor1_str + ')) \\n')\n fil.write(' m2.append( ft.arb(' + minor2_str + ')) \\n')\n fil.write(\n 'innrer_loops=[i for i in range(m) if 0 in m1[i] and 0 in m2[i] ]\\n')\n fil.write('print(innrer_loops)\\n')\n fil.close()\n t = os.popen('python3 evaluation_file1.py ').read()\n return t\n\n\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\ndef enclosing_curve(system, B, X, eps_min=0.1, eps_max=0.1):\n L = [B]\n certified_boxes = []\n uncertified_boxes = []\n while len(L) != 0:\n system_generator(system, L[0], X)\n os.system('ibexsolve --eps-max=' + str(eps_max) + ' --eps-min=' +\n str(eps_min) + ' -s eq.txt > output.txt')\n content = open('output.txt', 'r').readlines()\n ibex_output = computing_boxes()\n if ibex_output == [[], []] and max([(Bi[1] - Bi[0]) for Bi in L[0]]\n ) < eps_min:\n uncertified_boxes.append(L[0])\n L.remove(L[0])\n elif ibex_output == [[], []]:\n children = plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n elif ibex_output == 'Empty':\n L.remove(L[0])\n else:\n if len(ibex_output[0]) != 0:\n certified_boxes += ibex_output[0]\n if len(ibex_output[1]) != 0:\n uncertified_boxes += ibex_output[1]\n L.remove(L[0])\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n\n\ndef estimating_t1(components, upper_bound=200000):\n t1 = upper_bound\n t2 = 0\n for box1 in components[0]:\n for box2 in components[1]:\n a = d.distance(box1, box2).lower()\n b = d.distance(box1, box2).upper()\n if t1 > a:\n t1 = a\n if t2 < b:\n t2 = b\n t = d.ftconstructor(t1, t2)\n t = 0.25 * d.power_interval(t, 2)\n return [float(t.lower()), float(t.upper())]\n\n\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n\n\ndef ploting_boxes(boxes, uncer_boxes, var=[0, 1], B=[[-20, 20], [-20, 20]],\n x=0.1, nodes=[], cusps=[], uncer_Solutions=[], Legend=False, color=\n 'green', variabel_name='x'):\n fig, ax = plt.subplots()\n ax.set_xlim(B[0][0], B[0][1])\n ax.set_ylim(B[1][0], B[1][1])\n ax.set_xlabel(variabel_name + str(1))\n ax.set_ylabel(variabel_name + str(2))\n \"\"\"try:\n ax.title(open(\"system.txt\",\"r\").read())\n except:\n pass\"\"\"\n c = 0\n green_patch = mpatches.Patch(color=color, label='smooth part')\n red_patch = mpatches.Patch(color='red', label='unknown part')\n node_patch = mpatches.Patch(color='black', label='Certified nodes',\n fill=None)\n cusp_patch = mpatches.Patch(color='blue', label=\n 'Projection of certified solution with t=0 ', fill=None)\n if Legend == True:\n plt.legend(handles=[green_patch, red_patch, node_patch, cusp_patch])\n for box in boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0],\n color=color)\n plt.gca().add_patch(rectangle)\n for box in uncer_boxes:\n rectangle = plt.Rectangle((box[var[0]][0], box[var[1]][0]), box[var\n [0]][1] - box[var[0]][0], box[var[1]][1] - box[var[1]][0], fc='r')\n plt.gca().add_patch(rectangle)\n for box in nodes:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n fill=None)\n plt.gca().add_patch(rectangle)\n for box in cusps:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='blue', fill=None)\n plt.gca().add_patch(rectangle)\n for box in uncer_Solutions:\n rectangle = plt.Rectangle((box[0][0] - x, box[1][0] - x), 2 * x +\n box[0][1] - box[0][0], 2 * x + box[1][1] - box[1][0], fc='y',\n color='red', fill=None)\n plt.gca().add_patch(rectangle)\n plt.savefig('fig.jpg', dpi=1000)\n plt.show()\n\n\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\ndef SDP_str(P, X):\n n = len(X)\n P_pluse = P[:]\n P_minus = P[:]\n for i in range(2, n):\n P_pluse = P_pluse.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '+ r' + str(i + 1) + '*sqrt(t))')\n P_minus = P_minus.replace('x' + str(i + 1), '(x' + str(i + 1) +\n '- r' + str(i + 1) + '*sqrt(t))')\n SP = '0.5*(' + P_pluse + '+' + P_minus + ')=0; \\n'\n DP = '0.5*(' + P_pluse + '- (' + P_minus + ') )/(sqrt(t))=0; \\n'\n return [SP, DP]\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\ndef planner_connected_compnants(boxes):\n if len(boxes) == 0:\n return []\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i][:2], components[j][k][:2]\n ) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi[:2], boxj[:2]) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\ndef connected_compnants(boxes):\n ftboxes = boxes[:]\n components = [[ftboxes[0]]]\n for i in range(1, len(ftboxes)):\n boxi_isused = 0\n for j in range(len(components)):\n membership = 0\n for k in range(len(components[j])):\n if d.boxes_intersection(ftboxes[i], components[j][k]) != []:\n components[j].append(ftboxes[i])\n membership = 1\n boxi_isused = 1\n break\n if membership == 1:\n break\n if boxi_isused == 0:\n components.append([ftboxes[i]])\n unused = list(range(len(components)))\n components1 = components[:]\n components2 = []\n while len(components1) != len(components2):\n for i in unused:\n for j in [j for j in list(range(i + 1, len(components))) if j in\n unused]:\n intersection_exists = False\n is_looping = True\n for boxi in components[i]:\n for boxj in components[j]:\n if d.boxes_intersection(boxi, boxj) != []:\n is_looping = False\n intersection_exists = True\n break\n if is_looping == False:\n break\n if intersection_exists == True:\n components[i] += components[j]\n unused.remove(j)\n components2 = components1[:]\n components1 = [components[k] for k in unused]\n return components1\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n\n\ndef checking_assumptions(curve_data):\n if len(curve_data[0][1]) != 0:\n return 0\n Ball_sols_ft = [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for B in\n curve_data[1][0]] + [[d.ftconstructor(Bi[0], Bi[1]) for Bi in B] for\n B in curve_data[1][1]]\n alph3 = assum_alph3_checker(Ball_sols_ft)\n if alph3 == 1:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\ndef plotting_3D(boxes, Box, var=[0, 1, 2]):\n ax = plt.figure().add_subplot(111, projection='3d')\n ax.set_xlim(Box[0][0], Box[0][1])\n ax.set_ylim(Box[1][0], Box[1][1])\n ax.set_zlim(Box[2][0], Box[2][1])\n ax.set_xlabel('x' + str(var[0] + 1))\n ax.set_ylabel('x' + str(var[1] + 1))\n ax.set_zlabel('x' + str(var[2] + 1))\n for box in boxes:\n V = [[box[j][0] for j in range(3)], [box[j][1] for j in range(3)]]\n points = list(itertools.product(*box))\n faces = [[points[0], points[2], points[6], points[4]], [points[0],\n points[2], points[3], points[1]], [points[0], points[1], points\n [5], points[4]], [points[2], points[3], points[7], points[6]],\n [points[1], points[3], points[7], points[5]]]\n ax.add_collection3d(Poly3DCollection(faces, facecolors='green',\n linewidths=1, edgecolors='green', alpha=0.25))\n plt.show()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef intersect_in_2D(class1, class2, monotonicity=1):\n pl_intesected_pairs = []\n if monotonicity == 1:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != [\n ] and d.boxes_intersection(class1[i], class2[j]) == []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 0:\n for i in range(len(class1)):\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n if [class2[j], class1[i]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[i], class2[j]])\n elif monotonicity == 2:\n inters_indic = []\n for i in range(len(class1)):\n inters_indic.append([])\n for j in range(len(class2)):\n if d.boxes_intersection(class1[i][:2], class2[j][:2]) != []:\n inters_indic[i] = inters_indic[i] + [j]\n for k in range(len(class1)):\n if len(inters_indic[k]) > 3:\n for j in range(len(inters_indic[k])):\n if [class2[j], class1[k]] not in pl_intesected_pairs:\n pl_intesected_pairs.append([class1[k], class2[j]])\n return pl_intesected_pairs\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n\n\ndef plane_subdivision(B):\n ft_B2 = d.subdivide([d.ftconstructor(Bi[0], Bi[1]) for Bi in B[:2]])\n normal_B2 = [d.ft_normal(Bi) for Bi in ft_B2]\n return d.cartesian_product(normal_B2, [B[2:]])\n\n\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef solving_fornodes(equations, boxes, B, X, eps=0.1):\n plane_components = detecting_nodes(boxes, B, equations, X, eps)\n g = open(equations, 'r')\n P = [Pi.replace('\\n', '') for Pi in g.readlines()]\n Ball_solutions = []\n for plane_component in plane_components:\n x1 = float(min([ai[0].lower() for ai in plane_component]))\n x2 = float(max([ai[0].upper() for ai in plane_component]))\n y1 = float(min([ai[1].lower() for ai in plane_component]))\n y2 = float(max([ai[1].upper() for ai in plane_component]))\n components = connected_compnants(plane_component)\n r = [[float(ri[0]), float(ri[1])] for ri in estimating_r(components)]\n t = estimating_t(components)\n t = [float(t[0]), float(t[1])]\n B_Ball = [[x1, x2], [y1, y2]] + r + [t]\n Ball_generating_system(P, B_Ball, X)\n solutionsi = ibex_output(P, B_Ball, X)\n Ball_solutions += solutionsi\n return Ball_solutions\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\ndef boxes_sort(boxes):\n sorted_boxes = boxes[:]\n for i in range(len(boxes) - 1):\n for j in range(i + 1, len(boxes)):\n if boxes_compare(sorted_boxes[i], sorted_boxes[j]) == 1:\n sorted_boxes[i], sorted_boxes[j] = sorted_boxes[j\n ], sorted_boxes[i]\n return sorted_boxes\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef Ball_solver(equations, B_Ball, X):\n L = [B_Ball]\n certified_boxes = []\n uncertified_boxes = []\n n = len(X)\n while len(L) != 0:\n solvability = 1\n if B_Ball[2 * n - 2][0] <= 0 <= B_Ball[2 * n - 2][1] and d.width([d\n .ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]) < 0.1:\n Ball_cusp_gen(equations, B_Ball, X)\n elif (B_Ball[2 * n - 2][0] > 0 or 0 > B_Ball[2 * n - 2][1]\n ) and d.width([d.ftconstructor(Bi[0], Bi[1]) for Bi in L[0]]\n ) < 0.1:\n Ball_node_gen(equations, B_Ball, X)\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n solvability = 0\n if solvability == 1:\n ibex_output = cb.solving_with_ibex()\n if ibex_output[0] == 'Empty':\n L.remove(L[0])\n elif len(ibex_output[0]) != 0:\n certified_boxes += cb.computing_boxes(ibex_output[0])\n L.remove(L[0])\n elif len(ibex_output[1]) != 0:\n uncertified_boxes += cb.computing_boxes(ibex_output[1])\n L.remove(L[0])\n else:\n children = cb.plane_subdivision(L[0])\n L.remove(L[0])\n L += children\n return [certified_boxes, uncertified_boxes]\n\n\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef boxes_compare(box1, box2):\n flage = 0\n for i in range(len(box1) - 1, -1, -1):\n if box1[i][0] > box2[i][0]:\n return 1\n if box1[i][0] < box2[i][0]:\n return -1\n return 0\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n\n\ndef intersting_boxes(curve, b):\n cer_intersting_boxes = []\n uncer_intersting_boxes = []\n for box in curve[0]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n cer_intersting_boxes.append(box)\n for box in curve[1]:\n if b[0][0] <= box[0][0] <= box[0][1] <= b[0][1] and b[1][0] <= box[1][0\n ] <= box[1][1] <= b[1][1]:\n uncer_intersting_boxes.append(box)\n return [cer_intersting_boxes, uncer_intersting_boxes]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef computing_boxes():\n if 'infeasible' in open('output.txt', 'r').read():\n return 'Empty'\n content = open('output.txt', 'r').readlines()\n cer = []\n uncer = []\n i = 0\n Answer = []\n for fi in content:\n try:\n a = fi.index('(')\n b = fi.index(')')\n T = fi[a:b + 1].replace('(', '[')\n T = fi[a:b + 1].replace('(', '[')\n T = T.replace(')', ']')\n T = T.split(';')\n E = []\n i = 0\n for Ti in T:\n Ti = Ti.replace('[', '')\n Ti = Ti.replace(']', '')\n Ti = Ti.replace('<', '')\n Ti = Ti.replace('>', '')\n x = Ti.index(',')\n a = float(Ti[:x])\n b = float(Ti[x + 1:])\n E.append([])\n E[i] = [a, b]\n i += 1\n if 'solution n' in fi or 'boundary n' in fi:\n cer.append(E)\n elif 'unknown n' in fi:\n uncer.append(E)\n except ValueError:\n pass\n return [cer, uncer]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef assum_alph3_checker(solutions):\n comparing_list = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != []:\n comparing_list[i].append(j)\n comparing_list[j].append(i)\n matching = [len(T) for T in comparing_list]\n if max(matching) <= 2:\n return 1\n else:\n return 0\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef Ball_generating_system(P, B_Ball, X, eps_min=0.001):\n n = len(X)\n V = ' Variables \\n '\n for i in range(n):\n if B_Ball[i][0] != B_Ball[i][1]:\n V += 'x' + str(i + 1) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n else:\n V += 'x' + str(i + 1) + ' in ' + str([B_Ball[i][0] - eps_min, \n B_Ball[i][1] + eps_min]) + ' ; \\n'\n for i in range(n, 2 * n - 2):\n V += 'r' + str(i - n + 3) + ' in ' + str(B_Ball[i]) + ' ; \\n'\n V += 't' + ' in ' + str(B_Ball[2 * n - 2]) + ' ; \\n'\n V += 'Constraints \\n'\n for Pi in P:\n V += SDP_str(Pi, X)[0]\n V += SDP_str(Pi, X)[1]\n last_eq = ''\n for i in range(3, n):\n last_eq += 'r' + str(i) + '^2+'\n last_eq += 'r' + str(n) + '^2 -1=0;'\n V += last_eq + '\\n'\n f = open('eq.txt', 'w+')\n f.write(V)\n f.write('end')\n f.close()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef projection_checker(solutions):\n if len(solutions) == 0:\n return [[], []]\n m = len(solutions[0])\n n = int((m + 1) / 2)\n intersect_in2d = [[]] * len(solutions)\n for i in range(len(solutions) - 1):\n for j in range(i + 1, len(solutions)):\n if solutions[i] == solutions[j]:\n continue\n elif d.boxes_intersection(solutions[i][:2], solutions[j][:2]) != [\n ] and (d.boxes_intersection(solutions[i][n:2 * n - 2], [[-\n Bi[1], -Bi[0]] for Bi in solutions[j][n:2 * n - 2]]) == [] and\n d.boxes_intersection(solutions[i][n:2 * n - 2], [[Bi[0], Bi\n [1]] for Bi in solutions[j][n:2 * n - 2]]) == [] or d.\n boxes_intersection(solutions[i][2:n] + [solutions[i][2 * n -\n 2]], solutions[j][2:n] + [solutions[j][2 * n - 2]]) == []):\n intersect_in2d[i] = intersect_in2d[i] + [j]\n accepted = []\n acc_ind = []\n unaccepted = []\n unacc_ind = []\n for i in range(len(solutions)):\n if len(intersect_in2d[i]) == 0 and i not in unacc_ind + acc_ind:\n accepted.append(solutions[i])\n acc_ind.append(i)\n continue\n elif i not in unacc_ind + acc_ind:\n unaccepted.append(solutions[i])\n unacc_ind.append(i)\n for k in intersect_in2d[i]:\n if k not in unacc_ind:\n unaccepted.append(solutions[k])\n unacc_ind.append(k)\n return [accepted, unaccepted]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n"
] | false |
98,384 |
4bf8dcc96cea641c3522ca1504713db71005a3fd
|
from timer import Timer
from sys import argv
timer = Timer()
try:
timer.pomodore(int(argv[1]), int(argv[2]))
except:
timer.timer(int(argv[1]))
|
[
"from timer import Timer\nfrom sys import argv\n\ntimer = Timer()\ntry:\n timer.pomodore(int(argv[1]), int(argv[2]))\nexcept: \n timer.timer(int(argv[1]))\n",
"from timer import Timer\nfrom sys import argv\ntimer = Timer()\ntry:\n timer.pomodore(int(argv[1]), int(argv[2]))\nexcept:\n timer.timer(int(argv[1]))\n",
"<import token>\ntimer = Timer()\ntry:\n timer.pomodore(int(argv[1]), int(argv[2]))\nexcept:\n timer.timer(int(argv[1]))\n",
"<import token>\n<assignment token>\ntry:\n timer.pomodore(int(argv[1]), int(argv[2]))\nexcept:\n timer.timer(int(argv[1]))\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,385 |
579360a0fc56706ceaf3a2b0322fe136fbdbba13
|
#!/usr/bin/env python
from Mech_arm import Mech_arm
from PCA9685 import PCA9685
from time import sleep
if __name__ == '__main__':
pca = PCA9685()
motor_set_dic = {
"stage": 15,
"shoulder": 14,
"elbow": 13,
"wrist": 12
}
arm = Mech_arm(pca, motor_set_dic)
for i in range(5):
sleep(0.5)
arm.moveMotorByDutyCycleRelative('stage', 0.5);
arm.moveMotorByDutyCycleRelative('shoulder', 0.5);
arm.moveMotorByDutyCycleRelative('elbow', 0.5);
arm.moveMotorByDutyCycleRelative('wrist', 0.5);
for i in range(5):
sleep(0.5)
arm.moveMotorByDutyCycleRelative('stage', -0.5);
arm.moveMotorByDutyCycleRelative('shoulder', -0.5);
arm.moveMotorByDutyCycleRelative('elbow', -0.5);
arm.moveMotorByDutyCycleRelative('wrist', -0.5);
arm.moveMotorHome()
|
[
"#!/usr/bin/env python\n\nfrom Mech_arm import Mech_arm\nfrom PCA9685 import PCA9685\nfrom time import sleep\n\nif __name__ == '__main__':\n pca = PCA9685()\n motor_set_dic = {\n \"stage\": 15,\n \"shoulder\": 14,\n \"elbow\": 13,\n \"wrist\": 12\n }\n arm = Mech_arm(pca, motor_set_dic)\n for i in range(5):\n sleep(0.5)\n arm.moveMotorByDutyCycleRelative('stage', 0.5);\n arm.moveMotorByDutyCycleRelative('shoulder', 0.5);\n arm.moveMotorByDutyCycleRelative('elbow', 0.5);\n arm.moveMotorByDutyCycleRelative('wrist', 0.5);\n\n for i in range(5):\n sleep(0.5)\n arm.moveMotorByDutyCycleRelative('stage', -0.5);\n arm.moveMotorByDutyCycleRelative('shoulder', -0.5);\n arm.moveMotorByDutyCycleRelative('elbow', -0.5);\n arm.moveMotorByDutyCycleRelative('wrist', -0.5);\n\n arm.moveMotorHome()\n",
"from Mech_arm import Mech_arm\nfrom PCA9685 import PCA9685\nfrom time import sleep\nif __name__ == '__main__':\n pca = PCA9685()\n motor_set_dic = {'stage': 15, 'shoulder': 14, 'elbow': 13, 'wrist': 12}\n arm = Mech_arm(pca, motor_set_dic)\n for i in range(5):\n sleep(0.5)\n arm.moveMotorByDutyCycleRelative('stage', 0.5)\n arm.moveMotorByDutyCycleRelative('shoulder', 0.5)\n arm.moveMotorByDutyCycleRelative('elbow', 0.5)\n arm.moveMotorByDutyCycleRelative('wrist', 0.5)\n for i in range(5):\n sleep(0.5)\n arm.moveMotorByDutyCycleRelative('stage', -0.5)\n arm.moveMotorByDutyCycleRelative('shoulder', -0.5)\n arm.moveMotorByDutyCycleRelative('elbow', -0.5)\n arm.moveMotorByDutyCycleRelative('wrist', -0.5)\n arm.moveMotorHome()\n",
"<import token>\nif __name__ == '__main__':\n pca = PCA9685()\n motor_set_dic = {'stage': 15, 'shoulder': 14, 'elbow': 13, 'wrist': 12}\n arm = Mech_arm(pca, motor_set_dic)\n for i in range(5):\n sleep(0.5)\n arm.moveMotorByDutyCycleRelative('stage', 0.5)\n arm.moveMotorByDutyCycleRelative('shoulder', 0.5)\n arm.moveMotorByDutyCycleRelative('elbow', 0.5)\n arm.moveMotorByDutyCycleRelative('wrist', 0.5)\n for i in range(5):\n sleep(0.5)\n arm.moveMotorByDutyCycleRelative('stage', -0.5)\n arm.moveMotorByDutyCycleRelative('shoulder', -0.5)\n arm.moveMotorByDutyCycleRelative('elbow', -0.5)\n arm.moveMotorByDutyCycleRelative('wrist', -0.5)\n arm.moveMotorHome()\n",
"<import token>\n<code token>\n"
] | false |
98,386 |
92ddf5ab6b6bd57c44b0e3c3ea96882e95f3d647
|
#-----------------------------------------------------------------------------
# Title : PyRogue Cryo Amc Core
#-----------------------------------------------------------------------------
# File : _hmc305.py
# Created : 2017-04-03
#-----------------------------------------------------------------------------
# Description:
# PyRogue Cryo Amc Core
#-----------------------------------------------------------------------------
# This file is part of the rogue software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
class Hmc305(pr.Device):
def __init__( self,
name = "Hmc305",
description = "Hmc305 module",
**kwargs):
super().__init__(name=name, description=description, **kwargs)
devConfig = [
['DC[1]', 0x1C],
['DC[2]', 0x08],
['DC[3]', 0x04],
['DC[4]', 0x00],
['UC[1]', 0x18],
['UC[2]', 0x14],
['UC[3]', 0x10],
['UC[4]', 0x0C],
]
for i in range(8):
self.add(pr.RemoteVariable(
name = devConfig[i][0],
description = 'Hmc305 Device: Note that firmware does an invert and bit order swap to make the software interface with a LSB of 0.5dB',
offset = devConfig[i][1],
bitSize = 5,
mode = 'RW',
units = '0.5dB',
))
|
[
"#-----------------------------------------------------------------------------\n# Title : PyRogue Cryo Amc Core\n#-----------------------------------------------------------------------------\n# File : _hmc305.py\n# Created : 2017-04-03\n#-----------------------------------------------------------------------------\n# Description:\n# PyRogue Cryo Amc Core\n#-----------------------------------------------------------------------------\n# This file is part of the rogue software platform. It is subject to\n# the license terms in the LICENSE.txt file found in the top-level directory\n# of this distribution and at:\n# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.\n# No part of the rogue software platform, including this file, may be\n# copied, modified, propagated, or distributed except according to the terms\n# contained in the LICENSE.txt file.\n#-----------------------------------------------------------------------------\n\nimport pyrogue as pr\n\nclass Hmc305(pr.Device):\n def __init__( self,\n name = \"Hmc305\",\n description = \"Hmc305 module\",\n **kwargs):\n super().__init__(name=name, description=description, **kwargs)\n\n devConfig = [\n ['DC[1]', 0x1C],\n ['DC[2]', 0x08],\n ['DC[3]', 0x04],\n ['DC[4]', 0x00],\n ['UC[1]', 0x18],\n ['UC[2]', 0x14],\n ['UC[3]', 0x10],\n ['UC[4]', 0x0C],\n ]\n\n for i in range(8):\n self.add(pr.RemoteVariable(\n name = devConfig[i][0],\n description = 'Hmc305 Device: Note that firmware does an invert and bit order swap to make the software interface with a LSB of 0.5dB',\n offset = devConfig[i][1],\n bitSize = 5,\n mode = 'RW',\n units = '0.5dB',\n ))\n",
"import pyrogue as pr\n\n\nclass Hmc305(pr.Device):\n\n def __init__(self, name='Hmc305', description='Hmc305 module', **kwargs):\n super().__init__(name=name, description=description, **kwargs)\n devConfig = [['DC[1]', 28], ['DC[2]', 8], ['DC[3]', 4], ['DC[4]', 0\n ], ['UC[1]', 24], ['UC[2]', 20], ['UC[3]', 16], ['UC[4]', 12]]\n for i in range(8):\n self.add(pr.RemoteVariable(name=devConfig[i][0], description=\n 'Hmc305 Device: Note that firmware does an invert and bit order swap to make the software interface with a LSB of 0.5dB'\n , offset=devConfig[i][1], bitSize=5, mode='RW', units='0.5dB'))\n",
"<import token>\n\n\nclass Hmc305(pr.Device):\n\n def __init__(self, name='Hmc305', description='Hmc305 module', **kwargs):\n super().__init__(name=name, description=description, **kwargs)\n devConfig = [['DC[1]', 28], ['DC[2]', 8], ['DC[3]', 4], ['DC[4]', 0\n ], ['UC[1]', 24], ['UC[2]', 20], ['UC[3]', 16], ['UC[4]', 12]]\n for i in range(8):\n self.add(pr.RemoteVariable(name=devConfig[i][0], description=\n 'Hmc305 Device: Note that firmware does an invert and bit order swap to make the software interface with a LSB of 0.5dB'\n , offset=devConfig[i][1], bitSize=5, mode='RW', units='0.5dB'))\n",
"<import token>\n\n\nclass Hmc305(pr.Device):\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,387 |
f3491cbc3026443920f4a6d3a51430249e0e945f
|
#!/usr/bin/env python3
# Cálculo do IMC
familia = [
['Fabio', 1.82, 82],
['Juliana', 1.78,80],
['Taíssa', 1.77, 78],
['Erick', 1.20, 45],
['Gigi', 1.00, 25]
]
for linha in familia:
nome = linha[0]
altura = linha[1]
peso = linha[2]
imc = round(altura / peso**2, 5)
print('Nome:{}, Altura:{}, Peso:{}, IMC:{}'.format(nome, altura, peso, imc))
print()
# from numpy import array
#
# np_fam = array(familia)
#
# print(np_fam)
|
[
"#!/usr/bin/env python3\n\n# Cálculo do IMC\n\nfamilia = [\n ['Fabio', 1.82, 82],\n ['Juliana', 1.78,80],\n ['Taíssa', 1.77, 78],\n ['Erick', 1.20, 45],\n ['Gigi', 1.00, 25]\n]\n\nfor linha in familia:\n nome = linha[0]\n altura = linha[1]\n peso = linha[2]\n imc = round(altura / peso**2, 5)\n print('Nome:{}, Altura:{}, Peso:{}, IMC:{}'.format(nome, altura, peso, imc))\n\nprint()\n\n# from numpy import array\n#\n# np_fam = array(familia)\n#\n# print(np_fam)",
"familia = [['Fabio', 1.82, 82], ['Juliana', 1.78, 80], ['Taíssa', 1.77, 78],\n ['Erick', 1.2, 45], ['Gigi', 1.0, 25]]\nfor linha in familia:\n nome = linha[0]\n altura = linha[1]\n peso = linha[2]\n imc = round(altura / peso ** 2, 5)\n print('Nome:{}, Altura:{}, Peso:{}, IMC:{}'.format(nome, altura, peso, imc)\n )\nprint()\n",
"<assignment token>\nfor linha in familia:\n nome = linha[0]\n altura = linha[1]\n peso = linha[2]\n imc = round(altura / peso ** 2, 5)\n print('Nome:{}, Altura:{}, Peso:{}, IMC:{}'.format(nome, altura, peso, imc)\n )\nprint()\n",
"<assignment token>\n<code token>\n"
] | false |
98,388 |
0df2106ff73adc19ee205e1e058889071ff43211
|
print "3.0/.11 = "
print 3.0/.11
|
[
"print \"3.0/.11 = \"\n\nprint 3.0/.11\n\n"
] | true |
98,389 |
0c14feb71967204cfa09d01ed5ac8e2f9ebeaeff
|
def insertionSort(alist):
for i in range(1, len(alist)):
curr_val = alist[i]
j = i-1
while(j>=0 and alist[j]>curr_val):
alist[j+1] = alist[j]
j = j-1
alist[j+1] = curr_val
a = [2, 1, 9, 78, 4]
insertionSort(a)
print(a)
|
[
"def insertionSort(alist):\r\n for i in range(1, len(alist)):\r\n curr_val = alist[i]\r\n j = i-1\r\n while(j>=0 and alist[j]>curr_val):\r\n alist[j+1] = alist[j]\r\n j = j-1\r\n alist[j+1] = curr_val\r\n\r\na = [2, 1, 9, 78, 4]\r\ninsertionSort(a)\r\nprint(a)",
"def insertionSort(alist):\n for i in range(1, len(alist)):\n curr_val = alist[i]\n j = i - 1\n while j >= 0 and alist[j] > curr_val:\n alist[j + 1] = alist[j]\n j = j - 1\n alist[j + 1] = curr_val\n\n\na = [2, 1, 9, 78, 4]\ninsertionSort(a)\nprint(a)\n",
"def insertionSort(alist):\n for i in range(1, len(alist)):\n curr_val = alist[i]\n j = i - 1\n while j >= 0 and alist[j] > curr_val:\n alist[j + 1] = alist[j]\n j = j - 1\n alist[j + 1] = curr_val\n\n\n<assignment token>\ninsertionSort(a)\nprint(a)\n",
"def insertionSort(alist):\n for i in range(1, len(alist)):\n curr_val = alist[i]\n j = i - 1\n while j >= 0 and alist[j] > curr_val:\n alist[j + 1] = alist[j]\n j = j - 1\n alist[j + 1] = curr_val\n\n\n<assignment token>\n<code token>\n",
"<function token>\n<assignment token>\n<code token>\n"
] | false |
98,390 |
a053300548b0611e189fc33250da13a65d5f9bd7
|
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
class Project(models.Model):
name = models.CharField(max_length = 100)
def __str__(self):
return self.name
class Domain(models.Model):
quarter = models.IntegerField(default = 1, validators=[MaxValueValidator(3), MinValueValidator(1)])
name = models.CharField(max_length = 100)
department = models.CharField(max_length = 10)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Bug(models.Model):
#below fields are the main attributes
title = models.CharField(max_length = 200, blank = False)
risk = models.CharField(max_length = 10)
abstract = models.CharField(max_length = 300, blank = False)
impact = models.CharField(max_length = 400)
ease_of_exploitation = models.CharField(max_length = 10)
owasp_category = models.CharField(max_length = 100)
cvss = models.FloatField(blank = False)
cwe = models.IntegerField(blank = False)
domain = models.ForeignKey(Domain, on_delete=models.CASCADE)
recommendation = models.TextField(blank = False)
reference = models.TextField(blank = False)
poc = models.ImageField(upload_to = 'uploads/')
status = models.CharField(max_length = 10, choices = [('OPEN', 'OPEN'), ('CLOSE', 'CLOSE')], default = 'OPEN')
date = models.DateField(auto_now_add = True)
#below fields are the attributes required only for exporting in tracker (csv)
host_ip = models.CharField(max_length = 100)
port = models.CharField(max_length = 20)
def __str__(self):
return self.title
|
[
"from django.db import models\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\nclass Project(models.Model):\n name = models.CharField(max_length = 100)\n\n def __str__(self):\n return self.name\n\nclass Domain(models.Model):\n quarter = models.IntegerField(default = 1, validators=[MaxValueValidator(3), MinValueValidator(1)])\n name = models.CharField(max_length = 100)\n department = models.CharField(max_length = 10)\n project = models.ForeignKey(Project, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.name\n\nclass Bug(models.Model):\n #below fields are the main attributes\n title = models.CharField(max_length = 200, blank = False)\n risk = models.CharField(max_length = 10)\n abstract = models.CharField(max_length = 300, blank = False)\n impact = models.CharField(max_length = 400)\n ease_of_exploitation = models.CharField(max_length = 10)\n owasp_category = models.CharField(max_length = 100)\n cvss = models.FloatField(blank = False)\n cwe = models.IntegerField(blank = False)\n domain = models.ForeignKey(Domain, on_delete=models.CASCADE)\n recommendation = models.TextField(blank = False)\n reference = models.TextField(blank = False)\n poc = models.ImageField(upload_to = 'uploads/')\n status = models.CharField(max_length = 10, choices = [('OPEN', 'OPEN'), ('CLOSE', 'CLOSE')], default = 'OPEN')\n date = models.DateField(auto_now_add = True)\n\n #below fields are the attributes required only for exporting in tracker (csv)\n host_ip = models.CharField(max_length = 100)\n port = models.CharField(max_length = 20)\n\n def __str__(self):\n return self.title",
"from django.db import models\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\n\nclass Project(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Domain(models.Model):\n quarter = models.IntegerField(default=1, validators=[MaxValueValidator(\n 3), MinValueValidator(1)])\n name = models.CharField(max_length=100)\n department = models.CharField(max_length=10)\n project = models.ForeignKey(Project, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.name\n\n\nclass Bug(models.Model):\n title = models.CharField(max_length=200, blank=False)\n risk = models.CharField(max_length=10)\n abstract = models.CharField(max_length=300, blank=False)\n impact = models.CharField(max_length=400)\n ease_of_exploitation = models.CharField(max_length=10)\n owasp_category = models.CharField(max_length=100)\n cvss = models.FloatField(blank=False)\n cwe = models.IntegerField(blank=False)\n domain = models.ForeignKey(Domain, on_delete=models.CASCADE)\n recommendation = models.TextField(blank=False)\n reference = models.TextField(blank=False)\n poc = models.ImageField(upload_to='uploads/')\n status = models.CharField(max_length=10, choices=[('OPEN', 'OPEN'), (\n 'CLOSE', 'CLOSE')], default='OPEN')\n date = models.DateField(auto_now_add=True)\n host_ip = models.CharField(max_length=100)\n port = models.CharField(max_length=20)\n\n def __str__(self):\n return self.title\n",
"<import token>\n\n\nclass Project(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Domain(models.Model):\n quarter = models.IntegerField(default=1, validators=[MaxValueValidator(\n 3), MinValueValidator(1)])\n name = models.CharField(max_length=100)\n department = models.CharField(max_length=10)\n project = models.ForeignKey(Project, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.name\n\n\nclass Bug(models.Model):\n title = models.CharField(max_length=200, blank=False)\n risk = models.CharField(max_length=10)\n abstract = models.CharField(max_length=300, blank=False)\n impact = models.CharField(max_length=400)\n ease_of_exploitation = models.CharField(max_length=10)\n owasp_category = models.CharField(max_length=100)\n cvss = models.FloatField(blank=False)\n cwe = models.IntegerField(blank=False)\n domain = models.ForeignKey(Domain, on_delete=models.CASCADE)\n recommendation = models.TextField(blank=False)\n reference = models.TextField(blank=False)\n poc = models.ImageField(upload_to='uploads/')\n status = models.CharField(max_length=10, choices=[('OPEN', 'OPEN'), (\n 'CLOSE', 'CLOSE')], default='OPEN')\n date = models.DateField(auto_now_add=True)\n host_ip = models.CharField(max_length=100)\n port = models.CharField(max_length=20)\n\n def __str__(self):\n return self.title\n",
"<import token>\n\n\nclass Project(models.Model):\n <assignment token>\n\n def __str__(self):\n return self.name\n\n\nclass Domain(models.Model):\n quarter = models.IntegerField(default=1, validators=[MaxValueValidator(\n 3), MinValueValidator(1)])\n name = models.CharField(max_length=100)\n department = models.CharField(max_length=10)\n project = models.ForeignKey(Project, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.name\n\n\nclass Bug(models.Model):\n title = models.CharField(max_length=200, blank=False)\n risk = models.CharField(max_length=10)\n abstract = models.CharField(max_length=300, blank=False)\n impact = models.CharField(max_length=400)\n ease_of_exploitation = models.CharField(max_length=10)\n owasp_category = models.CharField(max_length=100)\n cvss = models.FloatField(blank=False)\n cwe = models.IntegerField(blank=False)\n domain = models.ForeignKey(Domain, on_delete=models.CASCADE)\n recommendation = models.TextField(blank=False)\n reference = models.TextField(blank=False)\n poc = models.ImageField(upload_to='uploads/')\n status = models.CharField(max_length=10, choices=[('OPEN', 'OPEN'), (\n 'CLOSE', 'CLOSE')], default='OPEN')\n date = models.DateField(auto_now_add=True)\n host_ip = models.CharField(max_length=100)\n port = models.CharField(max_length=20)\n\n def __str__(self):\n return self.title\n",
"<import token>\n\n\nclass Project(models.Model):\n <assignment token>\n <function token>\n\n\nclass Domain(models.Model):\n quarter = models.IntegerField(default=1, validators=[MaxValueValidator(\n 3), MinValueValidator(1)])\n name = models.CharField(max_length=100)\n department = models.CharField(max_length=10)\n project = models.ForeignKey(Project, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.name\n\n\nclass Bug(models.Model):\n title = models.CharField(max_length=200, blank=False)\n risk = models.CharField(max_length=10)\n abstract = models.CharField(max_length=300, blank=False)\n impact = models.CharField(max_length=400)\n ease_of_exploitation = models.CharField(max_length=10)\n owasp_category = models.CharField(max_length=100)\n cvss = models.FloatField(blank=False)\n cwe = models.IntegerField(blank=False)\n domain = models.ForeignKey(Domain, on_delete=models.CASCADE)\n recommendation = models.TextField(blank=False)\n reference = models.TextField(blank=False)\n poc = models.ImageField(upload_to='uploads/')\n status = models.CharField(max_length=10, choices=[('OPEN', 'OPEN'), (\n 'CLOSE', 'CLOSE')], default='OPEN')\n date = models.DateField(auto_now_add=True)\n host_ip = models.CharField(max_length=100)\n port = models.CharField(max_length=20)\n\n def __str__(self):\n return self.title\n",
"<import token>\n<class token>\n\n\nclass Domain(models.Model):\n quarter = models.IntegerField(default=1, validators=[MaxValueValidator(\n 3), MinValueValidator(1)])\n name = models.CharField(max_length=100)\n department = models.CharField(max_length=10)\n project = models.ForeignKey(Project, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.name\n\n\nclass Bug(models.Model):\n title = models.CharField(max_length=200, blank=False)\n risk = models.CharField(max_length=10)\n abstract = models.CharField(max_length=300, blank=False)\n impact = models.CharField(max_length=400)\n ease_of_exploitation = models.CharField(max_length=10)\n owasp_category = models.CharField(max_length=100)\n cvss = models.FloatField(blank=False)\n cwe = models.IntegerField(blank=False)\n domain = models.ForeignKey(Domain, on_delete=models.CASCADE)\n recommendation = models.TextField(blank=False)\n reference = models.TextField(blank=False)\n poc = models.ImageField(upload_to='uploads/')\n status = models.CharField(max_length=10, choices=[('OPEN', 'OPEN'), (\n 'CLOSE', 'CLOSE')], default='OPEN')\n date = models.DateField(auto_now_add=True)\n host_ip = models.CharField(max_length=100)\n port = models.CharField(max_length=20)\n\n def __str__(self):\n return self.title\n",
"<import token>\n<class token>\n\n\nclass Domain(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.name\n\n\nclass Bug(models.Model):\n title = models.CharField(max_length=200, blank=False)\n risk = models.CharField(max_length=10)\n abstract = models.CharField(max_length=300, blank=False)\n impact = models.CharField(max_length=400)\n ease_of_exploitation = models.CharField(max_length=10)\n owasp_category = models.CharField(max_length=100)\n cvss = models.FloatField(blank=False)\n cwe = models.IntegerField(blank=False)\n domain = models.ForeignKey(Domain, on_delete=models.CASCADE)\n recommendation = models.TextField(blank=False)\n reference = models.TextField(blank=False)\n poc = models.ImageField(upload_to='uploads/')\n status = models.CharField(max_length=10, choices=[('OPEN', 'OPEN'), (\n 'CLOSE', 'CLOSE')], default='OPEN')\n date = models.DateField(auto_now_add=True)\n host_ip = models.CharField(max_length=100)\n port = models.CharField(max_length=20)\n\n def __str__(self):\n return self.title\n",
"<import token>\n<class token>\n\n\nclass Domain(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Bug(models.Model):\n title = models.CharField(max_length=200, blank=False)\n risk = models.CharField(max_length=10)\n abstract = models.CharField(max_length=300, blank=False)\n impact = models.CharField(max_length=400)\n ease_of_exploitation = models.CharField(max_length=10)\n owasp_category = models.CharField(max_length=100)\n cvss = models.FloatField(blank=False)\n cwe = models.IntegerField(blank=False)\n domain = models.ForeignKey(Domain, on_delete=models.CASCADE)\n recommendation = models.TextField(blank=False)\n reference = models.TextField(blank=False)\n poc = models.ImageField(upload_to='uploads/')\n status = models.CharField(max_length=10, choices=[('OPEN', 'OPEN'), (\n 'CLOSE', 'CLOSE')], default='OPEN')\n date = models.DateField(auto_now_add=True)\n host_ip = models.CharField(max_length=100)\n port = models.CharField(max_length=20)\n\n def __str__(self):\n return self.title\n",
"<import token>\n<class token>\n<class token>\n\n\nclass Bug(models.Model):\n title = models.CharField(max_length=200, blank=False)\n risk = models.CharField(max_length=10)\n abstract = models.CharField(max_length=300, blank=False)\n impact = models.CharField(max_length=400)\n ease_of_exploitation = models.CharField(max_length=10)\n owasp_category = models.CharField(max_length=100)\n cvss = models.FloatField(blank=False)\n cwe = models.IntegerField(blank=False)\n domain = models.ForeignKey(Domain, on_delete=models.CASCADE)\n recommendation = models.TextField(blank=False)\n reference = models.TextField(blank=False)\n poc = models.ImageField(upload_to='uploads/')\n status = models.CharField(max_length=10, choices=[('OPEN', 'OPEN'), (\n 'CLOSE', 'CLOSE')], default='OPEN')\n date = models.DateField(auto_now_add=True)\n host_ip = models.CharField(max_length=100)\n port = models.CharField(max_length=20)\n\n def __str__(self):\n return self.title\n",
"<import token>\n<class token>\n<class token>\n\n\nclass Bug(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.title\n",
"<import token>\n<class token>\n<class token>\n\n\nclass Bug(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,391 |
2a1e99b596418c1934de7c16b5aee64b77a7dcff
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-05 14:54
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(blank=True, upload_to='%Y/%m/%d/profiles/')),
('date_of_birth', models.DateTimeField(blank=True)),
('bio', models.CharField(help_text='350 characters only. Make it short ^_^', max_length=350)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"# -*- coding: utf-8 -*-\n# Generated by Django 1.11.3 on 2017-07-05 14:54\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('photo', models.ImageField(blank=True, upload_to='%Y/%m/%d/profiles/')),\n ('date_of_birth', models.DateTimeField(blank=True)),\n ('bio', models.CharField(help_text='350 characters only. Make it short ^_^', max_length=350)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n",
"from __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Profile', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('photo', models.ImageField(blank=True,\n upload_to='%Y/%m/%d/profiles/')), ('date_of_birth', models.\n DateTimeField(blank=True)), ('bio', models.CharField(help_text=\n '350 characters only. Make it short ^_^', max_length=350)), ('user',\n models.OneToOneField(on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL))])]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Profile', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('photo', models.ImageField(blank=True,\n upload_to='%Y/%m/%d/profiles/')), ('date_of_birth', models.\n DateTimeField(blank=True)), ('bio', models.CharField(help_text=\n '350 characters only. Make it short ^_^', max_length=350)), ('user',\n models.OneToOneField(on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL))])]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,392 |
ce59ded71259a36e4137e9f5c8597318eca8c7db
|
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
a = '{0:032b}'.format(x)
b = '{0:032b}'.format(y)
count=0
i=0
while i<len(a):
if a[i]!=b[i]:
count+=1
i+=1
return count
|
[
"class Solution:\n def hammingDistance(self, x: int, y: int) -> int:\n a = '{0:032b}'.format(x)\n b = '{0:032b}'.format(y)\n count=0\n i=0\n while i<len(a):\n if a[i]!=b[i]:\n count+=1\n i+=1\n return count ",
"class Solution:\n\n def hammingDistance(self, x: int, y: int) ->int:\n a = '{0:032b}'.format(x)\n b = '{0:032b}'.format(y)\n count = 0\n i = 0\n while i < len(a):\n if a[i] != b[i]:\n count += 1\n i += 1\n return count\n",
"class Solution:\n <function token>\n",
"<class token>\n"
] | false |
98,393 |
1b71eac094e4bd63f754a1254f57b6ad04db935c
|
#!/usr/bin/env python3
import rospy
from geometry_msgs.msg import Twist
def main():
pub = rospy.Publisher('my_diff_drive/cmd_vel', Twist, queue_size=10)
rospy.init_node('circler', anonymous=True)
rate = rospy.Rate(2) # 2hz
msg = Twist()
msg.linear.x = 10
msg.angular.z = 0
while not rospy.is_shutdown():
msg.linear.x += .02
pub.publish(msg)
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
[
"#!/usr/bin/env python3\nimport rospy\nfrom geometry_msgs.msg import Twist\n\n\ndef main():\n pub = rospy.Publisher('my_diff_drive/cmd_vel', Twist, queue_size=10)\n rospy.init_node('circler', anonymous=True)\n\n rate = rospy.Rate(2) # 2hz\n msg = Twist()\n msg.linear.x = 10\n msg.angular.z = 0\n\n while not rospy.is_shutdown():\n msg.linear.x += .02\n pub.publish(msg)\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n main()\n except rospy.ROSInterruptException:\n pass",
"import rospy\nfrom geometry_msgs.msg import Twist\n\n\ndef main():\n pub = rospy.Publisher('my_diff_drive/cmd_vel', Twist, queue_size=10)\n rospy.init_node('circler', anonymous=True)\n rate = rospy.Rate(2)\n msg = Twist()\n msg.linear.x = 10\n msg.angular.z = 0\n while not rospy.is_shutdown():\n msg.linear.x += 0.02\n pub.publish(msg)\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n",
"<import token>\n\n\ndef main():\n pub = rospy.Publisher('my_diff_drive/cmd_vel', Twist, queue_size=10)\n rospy.init_node('circler', anonymous=True)\n rate = rospy.Rate(2)\n msg = Twist()\n msg.linear.x = 10\n msg.angular.z = 0\n while not rospy.is_shutdown():\n msg.linear.x += 0.02\n pub.publish(msg)\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n",
"<import token>\n\n\ndef main():\n pub = rospy.Publisher('my_diff_drive/cmd_vel', Twist, queue_size=10)\n rospy.init_node('circler', anonymous=True)\n rate = rospy.Rate(2)\n msg = Twist()\n msg.linear.x = 10\n msg.angular.z = 0\n while not rospy.is_shutdown():\n msg.linear.x += 0.02\n pub.publish(msg)\n rate.sleep()\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
98,394 |
2622b1ec9d1fb5b2321d7d1c4b5a86b9551606b9
|
import time
###############################
### AWS FUNCTIONS ###
###############################
import aws
###############################
### Publish Messages ###
###############################
def publish_messages(count_of_messages_to_send):
# connect to SNS
sns = aws.get_sns()
# set the topic to Job Request where scale-out alarm sits
topic = aws.SNS_JOB_REQUESTS_TOPIC
# the message doesn't matter:
# job listener is not running during testing,
# so message will not be picked up for processing
message_data = {'job_id': 'job_for_test_msg',
'user_id': 'user_for_test_msg',
'user_name': 'john doe',
'user_email': '[email protected]',
'user_role': 'free_user',
'input_file_name': 'test.vcf',
's3_inputs_bucket': aws.S3_INPUTS_BUCKET,
's3_key_input_file': 'ramonlrodriguez/annotator_testing/test.vcf',
'submit_time': int(time.time()),
'job_status': 'TEST_JOB'
}
# publish the messages
for message in range(count_of_messages_to_send):
aws.publish_message(sns, topic, message_data)
###############################
### Send Test Messages ###
###############################
messages_per_blast = 5
seconds_between_blasts = 1
print("-----------------------")
print("Blasting messages until program stopped:")
print("ctrl+c to stop\n")
print("-----------------------")
print("Messages per blast: " + str(messages_per_blast))
print("Seconds between blasts: " + str(seconds_between_blasts))
print("-----------------------\n\n")
while True:
print("-----------------------")
print("\nBlasting messages...")
publish_messages(messages_per_blast)
print("\nSleeping...")
time.sleep(seconds_between_blasts)
|
[
"import time\n\n\n###############################\n### AWS FUNCTIONS ###\n###############################\n\nimport aws\n\n###############################\n### Publish Messages ###\n###############################\n\ndef publish_messages(count_of_messages_to_send):\n # connect to SNS\n sns = aws.get_sns()\n\n # set the topic to Job Request where scale-out alarm sits\n topic = aws.SNS_JOB_REQUESTS_TOPIC\n\n # the message doesn't matter:\n # job listener is not running during testing,\n # so message will not be picked up for processing\n message_data = {'job_id': 'job_for_test_msg',\n 'user_id': 'user_for_test_msg',\n 'user_name': 'john doe',\n 'user_email': '[email protected]',\n 'user_role': 'free_user',\n 'input_file_name': 'test.vcf',\n 's3_inputs_bucket': aws.S3_INPUTS_BUCKET,\n 's3_key_input_file': 'ramonlrodriguez/annotator_testing/test.vcf',\n 'submit_time': int(time.time()),\n 'job_status': 'TEST_JOB'\n }\n\n # publish the messages\n for message in range(count_of_messages_to_send):\n aws.publish_message(sns, topic, message_data)\n\n###############################\n### Send Test Messages ###\n###############################\n\nmessages_per_blast = 5\nseconds_between_blasts = 1\n\nprint(\"-----------------------\")\nprint(\"Blasting messages until program stopped:\")\nprint(\"ctrl+c to stop\\n\")\n\nprint(\"-----------------------\")\nprint(\"Messages per blast: \" + str(messages_per_blast))\nprint(\"Seconds between blasts: \" + str(seconds_between_blasts))\nprint(\"-----------------------\\n\\n\")\n\nwhile True:\n print(\"-----------------------\")\n print(\"\\nBlasting messages...\")\n publish_messages(messages_per_blast)\n print(\"\\nSleeping...\")\n time.sleep(seconds_between_blasts)\n",
"import time\nimport aws\n\n\ndef publish_messages(count_of_messages_to_send):\n sns = aws.get_sns()\n topic = aws.SNS_JOB_REQUESTS_TOPIC\n message_data = {'job_id': 'job_for_test_msg', 'user_id':\n 'user_for_test_msg', 'user_name': 'john doe', 'user_email':\n '[email protected]', 'user_role': 'free_user', 'input_file_name':\n 'test.vcf', 's3_inputs_bucket': aws.S3_INPUTS_BUCKET,\n 's3_key_input_file': 'ramonlrodriguez/annotator_testing/test.vcf',\n 'submit_time': int(time.time()), 'job_status': 'TEST_JOB'}\n for message in range(count_of_messages_to_send):\n aws.publish_message(sns, topic, message_data)\n\n\nmessages_per_blast = 5\nseconds_between_blasts = 1\nprint('-----------------------')\nprint('Blasting messages until program stopped:')\nprint('ctrl+c to stop\\n')\nprint('-----------------------')\nprint('Messages per blast: ' + str(messages_per_blast))\nprint('Seconds between blasts: ' + str(seconds_between_blasts))\nprint('-----------------------\\n\\n')\nwhile True:\n print('-----------------------')\n print('\\nBlasting messages...')\n publish_messages(messages_per_blast)\n print('\\nSleeping...')\n time.sleep(seconds_between_blasts)\n",
"<import token>\n\n\ndef publish_messages(count_of_messages_to_send):\n sns = aws.get_sns()\n topic = aws.SNS_JOB_REQUESTS_TOPIC\n message_data = {'job_id': 'job_for_test_msg', 'user_id':\n 'user_for_test_msg', 'user_name': 'john doe', 'user_email':\n '[email protected]', 'user_role': 'free_user', 'input_file_name':\n 'test.vcf', 's3_inputs_bucket': aws.S3_INPUTS_BUCKET,\n 's3_key_input_file': 'ramonlrodriguez/annotator_testing/test.vcf',\n 'submit_time': int(time.time()), 'job_status': 'TEST_JOB'}\n for message in range(count_of_messages_to_send):\n aws.publish_message(sns, topic, message_data)\n\n\nmessages_per_blast = 5\nseconds_between_blasts = 1\nprint('-----------------------')\nprint('Blasting messages until program stopped:')\nprint('ctrl+c to stop\\n')\nprint('-----------------------')\nprint('Messages per blast: ' + str(messages_per_blast))\nprint('Seconds between blasts: ' + str(seconds_between_blasts))\nprint('-----------------------\\n\\n')\nwhile True:\n print('-----------------------')\n print('\\nBlasting messages...')\n publish_messages(messages_per_blast)\n print('\\nSleeping...')\n time.sleep(seconds_between_blasts)\n",
"<import token>\n\n\ndef publish_messages(count_of_messages_to_send):\n sns = aws.get_sns()\n topic = aws.SNS_JOB_REQUESTS_TOPIC\n message_data = {'job_id': 'job_for_test_msg', 'user_id':\n 'user_for_test_msg', 'user_name': 'john doe', 'user_email':\n '[email protected]', 'user_role': 'free_user', 'input_file_name':\n 'test.vcf', 's3_inputs_bucket': aws.S3_INPUTS_BUCKET,\n 's3_key_input_file': 'ramonlrodriguez/annotator_testing/test.vcf',\n 'submit_time': int(time.time()), 'job_status': 'TEST_JOB'}\n for message in range(count_of_messages_to_send):\n aws.publish_message(sns, topic, message_data)\n\n\n<assignment token>\nprint('-----------------------')\nprint('Blasting messages until program stopped:')\nprint('ctrl+c to stop\\n')\nprint('-----------------------')\nprint('Messages per blast: ' + str(messages_per_blast))\nprint('Seconds between blasts: ' + str(seconds_between_blasts))\nprint('-----------------------\\n\\n')\nwhile True:\n print('-----------------------')\n print('\\nBlasting messages...')\n publish_messages(messages_per_blast)\n print('\\nSleeping...')\n time.sleep(seconds_between_blasts)\n",
"<import token>\n\n\ndef publish_messages(count_of_messages_to_send):\n sns = aws.get_sns()\n topic = aws.SNS_JOB_REQUESTS_TOPIC\n message_data = {'job_id': 'job_for_test_msg', 'user_id':\n 'user_for_test_msg', 'user_name': 'john doe', 'user_email':\n '[email protected]', 'user_role': 'free_user', 'input_file_name':\n 'test.vcf', 's3_inputs_bucket': aws.S3_INPUTS_BUCKET,\n 's3_key_input_file': 'ramonlrodriguez/annotator_testing/test.vcf',\n 'submit_time': int(time.time()), 'job_status': 'TEST_JOB'}\n for message in range(count_of_messages_to_send):\n aws.publish_message(sns, topic, message_data)\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
98,395 |
179961efff3c229a57b9d0fdcbd9875e9ed2173f
|
# _*_ coding: utf-8 _*_
"""
# @Time : 2020/7/24 12:55
# @Author : yls
# @Version:V 0.1
# @File : e_seaborn.py
# @desc : Seaborn 是一个基于 Matplotlib 的 Python 数据可视化库,
# 提供绘制更加高层和优美的图形接口。详情参考:
# http://seaborn.pydata.org/
# 如下,绘制模型拟合后的残差图,y 值添加一个正态分布的误差。
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
if __name__ == '__main__':
"""
残差图看出,y 值误差符合均值 0、方差 0.1 的正态分布规律。
"""
sns.set(style='whitegrid')
rs = np.random.RandomState(1)
x = rs.normal(2,0.1,50)
y = 2 + 1.6*x+rs.normal(0,0.1,50)
sns.residplot(x,y,lowess=True,color='orange')
plt.show()
pass
|
[
"# _*_ coding: utf-8 _*_\n\"\"\"\n# @Time : 2020/7/24 12:55\n# @Author : yls\n# @Version:V 0.1\n# @File : e_seaborn.py\n# @desc : Seaborn 是一个基于 Matplotlib 的 Python 数据可视化库,\n# 提供绘制更加高层和优美的图形接口。详情参考:\n# http://seaborn.pydata.org/\n# 如下,绘制模型拟合后的残差图,y 值添加一个正态分布的误差。\n\"\"\"\n\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nif __name__ == '__main__':\n \"\"\"\n 残差图看出,y 值误差符合均值 0、方差 0.1 的正态分布规律。\n \"\"\"\n sns.set(style='whitegrid')\n rs = np.random.RandomState(1)\n x = rs.normal(2,0.1,50)\n y = 2 + 1.6*x+rs.normal(0,0.1,50)\n sns.residplot(x,y,lowess=True,color='orange')\n plt.show()\n pass\n",
"<docstring token>\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nif __name__ == '__main__':\n \"\"\"\n 残差图看出,y 值误差符合均值 0、方差 0.1 的正态分布规律。\n \"\"\"\n sns.set(style='whitegrid')\n rs = np.random.RandomState(1)\n x = rs.normal(2, 0.1, 50)\n y = 2 + 1.6 * x + rs.normal(0, 0.1, 50)\n sns.residplot(x, y, lowess=True, color='orange')\n plt.show()\n pass\n",
"<docstring token>\n<import token>\nif __name__ == '__main__':\n \"\"\"\n 残差图看出,y 值误差符合均值 0、方差 0.1 的正态分布规律。\n \"\"\"\n sns.set(style='whitegrid')\n rs = np.random.RandomState(1)\n x = rs.normal(2, 0.1, 50)\n y = 2 + 1.6 * x + rs.normal(0, 0.1, 50)\n sns.residplot(x, y, lowess=True, color='orange')\n plt.show()\n pass\n",
"<docstring token>\n<import token>\n<code token>\n"
] | false |
98,396 |
0e47d3671144909d95211a3487ee00c30b3f0e9d
|
""" Todo Plugin Module
"""
import re
from ashaw_notes.plugins import base_plugin
class Plugin(base_plugin.Plugin):
"""Todo Plugin Class"""
bypass_today = True
regex = re.compile(r'^todo(ne\[[0-9]*\])?:')
def is_plugin_note(self, note):
"""Verifies note relates to plugin"""
return bool(self.regex.match(note))
def process_input(self, note):
"""Handle note input"""
return note
|
[
"\"\"\" Todo Plugin Module\n\"\"\"\nimport re\nfrom ashaw_notes.plugins import base_plugin\n\n\nclass Plugin(base_plugin.Plugin):\n \"\"\"Todo Plugin Class\"\"\"\n bypass_today = True\n regex = re.compile(r'^todo(ne\\[[0-9]*\\])?:')\n\n def is_plugin_note(self, note):\n \"\"\"Verifies note relates to plugin\"\"\"\n return bool(self.regex.match(note))\n\n def process_input(self, note):\n \"\"\"Handle note input\"\"\"\n return note\n",
"<docstring token>\nimport re\nfrom ashaw_notes.plugins import base_plugin\n\n\nclass Plugin(base_plugin.Plugin):\n \"\"\"Todo Plugin Class\"\"\"\n bypass_today = True\n regex = re.compile('^todo(ne\\\\[[0-9]*\\\\])?:')\n\n def is_plugin_note(self, note):\n \"\"\"Verifies note relates to plugin\"\"\"\n return bool(self.regex.match(note))\n\n def process_input(self, note):\n \"\"\"Handle note input\"\"\"\n return note\n",
"<docstring token>\n<import token>\n\n\nclass Plugin(base_plugin.Plugin):\n \"\"\"Todo Plugin Class\"\"\"\n bypass_today = True\n regex = re.compile('^todo(ne\\\\[[0-9]*\\\\])?:')\n\n def is_plugin_note(self, note):\n \"\"\"Verifies note relates to plugin\"\"\"\n return bool(self.regex.match(note))\n\n def process_input(self, note):\n \"\"\"Handle note input\"\"\"\n return note\n",
"<docstring token>\n<import token>\n\n\nclass Plugin(base_plugin.Plugin):\n <docstring token>\n bypass_today = True\n regex = re.compile('^todo(ne\\\\[[0-9]*\\\\])?:')\n\n def is_plugin_note(self, note):\n \"\"\"Verifies note relates to plugin\"\"\"\n return bool(self.regex.match(note))\n\n def process_input(self, note):\n \"\"\"Handle note input\"\"\"\n return note\n",
"<docstring token>\n<import token>\n\n\nclass Plugin(base_plugin.Plugin):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def is_plugin_note(self, note):\n \"\"\"Verifies note relates to plugin\"\"\"\n return bool(self.regex.match(note))\n\n def process_input(self, note):\n \"\"\"Handle note input\"\"\"\n return note\n",
"<docstring token>\n<import token>\n\n\nclass Plugin(base_plugin.Plugin):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def process_input(self, note):\n \"\"\"Handle note input\"\"\"\n return note\n",
"<docstring token>\n<import token>\n\n\nclass Plugin(base_plugin.Plugin):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
98,397 |
1bdf4552f0d02c44630c0e1b06da3128e9167174
|
from django.db import models
from django.contrib.auth.models import User
TYPE_CHOICES = (('0', 'Bug'),('1', 'Feature'),('2', 'Enhancement'))
SEVERITY_CHOICES = (('0', 'Critical'),('1', 'High'),
('2', 'Medium'),('3', 'Low'))
# A task
class Task(models.Model):
project = models.ForeignKey('project.Project')
title = models.CharField(max_length=150)
type = models.CharField(max_length=2, choices=TYPE_CHOICES, default='0')
severity = models.CharField(max_length=2, choices=SEVERITY_CHOICES, default='2')
progress = models.PositiveIntegerField(default=0)
description = models.TextField()
assignees = models.ManyToManyField(User, related_name='task_assignees')
created_by = models.ForeignKey(User, related_name='task_created_by')
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
|
[
"from django.db import models\nfrom django.contrib.auth.models import User\n\nTYPE_CHOICES = (('0', 'Bug'),('1', 'Feature'),('2', 'Enhancement'))\nSEVERITY_CHOICES = (('0', 'Critical'),('1', 'High'),\n ('2', 'Medium'),('3', 'Low'))\n\n# A task\nclass Task(models.Model):\n\tproject = models.ForeignKey('project.Project')\n\ttitle = models.CharField(max_length=150)\n\ttype = models.CharField(max_length=2, choices=TYPE_CHOICES, default='0')\n\tseverity = models.CharField(max_length=2, choices=SEVERITY_CHOICES, default='2')\n\tprogress = models.PositiveIntegerField(default=0)\n\tdescription = models.TextField()\n\tassignees = models.ManyToManyField(User, related_name='task_assignees')\n\tcreated_by = models.ForeignKey(User, related_name='task_created_by')\n\tcreated_on = models.DateTimeField(auto_now_add=True)\n\tupdated_on = models.DateTimeField(auto_now=True)\n",
"from django.db import models\nfrom django.contrib.auth.models import User\nTYPE_CHOICES = ('0', 'Bug'), ('1', 'Feature'), ('2', 'Enhancement')\nSEVERITY_CHOICES = ('0', 'Critical'), ('1', 'High'), ('2', 'Medium'), ('3',\n 'Low')\n\n\nclass Task(models.Model):\n project = models.ForeignKey('project.Project')\n title = models.CharField(max_length=150)\n type = models.CharField(max_length=2, choices=TYPE_CHOICES, default='0')\n severity = models.CharField(max_length=2, choices=SEVERITY_CHOICES,\n default='2')\n progress = models.PositiveIntegerField(default=0)\n description = models.TextField()\n assignees = models.ManyToManyField(User, related_name='task_assignees')\n created_by = models.ForeignKey(User, related_name='task_created_by')\n created_on = models.DateTimeField(auto_now_add=True)\n updated_on = models.DateTimeField(auto_now=True)\n",
"<import token>\nTYPE_CHOICES = ('0', 'Bug'), ('1', 'Feature'), ('2', 'Enhancement')\nSEVERITY_CHOICES = ('0', 'Critical'), ('1', 'High'), ('2', 'Medium'), ('3',\n 'Low')\n\n\nclass Task(models.Model):\n project = models.ForeignKey('project.Project')\n title = models.CharField(max_length=150)\n type = models.CharField(max_length=2, choices=TYPE_CHOICES, default='0')\n severity = models.CharField(max_length=2, choices=SEVERITY_CHOICES,\n default='2')\n progress = models.PositiveIntegerField(default=0)\n description = models.TextField()\n assignees = models.ManyToManyField(User, related_name='task_assignees')\n created_by = models.ForeignKey(User, related_name='task_created_by')\n created_on = models.DateTimeField(auto_now_add=True)\n updated_on = models.DateTimeField(auto_now=True)\n",
"<import token>\n<assignment token>\n\n\nclass Task(models.Model):\n project = models.ForeignKey('project.Project')\n title = models.CharField(max_length=150)\n type = models.CharField(max_length=2, choices=TYPE_CHOICES, default='0')\n severity = models.CharField(max_length=2, choices=SEVERITY_CHOICES,\n default='2')\n progress = models.PositiveIntegerField(default=0)\n description = models.TextField()\n assignees = models.ManyToManyField(User, related_name='task_assignees')\n created_by = models.ForeignKey(User, related_name='task_created_by')\n created_on = models.DateTimeField(auto_now_add=True)\n updated_on = models.DateTimeField(auto_now=True)\n",
"<import token>\n<assignment token>\n\n\nclass Task(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
98,398 |
f8971c6e3499cc5d3b3b4b7fe5e7529a0d962ca3
|
"""Movie Ratings."""
from jinja2 import StrictUndefined
from flask import Flask, render_template, redirect, request, flash, session
from flask_debugtoolbar import DebugToolbarExtension
from sqlalchemy import func, update
from model import User, Rating, Movie, connect_to_db, db
app = Flask(__name__)
# Required to use Flask sessions and the debug toolbar
app.secret_key = "ABC"
# Normally, if you use an undefined variable in Jinja2, it fails silently.
# This is horrible. Fix this so that, instead, it raises an error.
app.jinja_env.undefined = StrictUndefined
@app.template_filter()
def datetimefilter(value, format='%b %d'):
"""Convert a datetime to a different format so it can be accessible in Jinja."""
return value.strftime(format)
app.jinja_env.filters['datetimefilter'] = datetimefilter
@app.route('/')
def index():
"""Homepage."""
# We want user profile link to show if user is logged in and clicks on homepage
# Check if logged in and get the value or else return None
# If there is a value, query to get user information so that user.user_id can be accessed in jinja
# Else, pass None value through so that if statement in jinja not executed
user_email = session.get("logged_in_user_email", None)
if user_email is not None:
user = User.query.filter(User.email == user_email).one()
return render_template("homepage.html", user=user)
else:
return render_template("homepage.html", user=None)
@app.route("/users")
def user_list():
"""Show list of users."""
users = User.query.all()
return render_template("user_list.html", users=users)
# This takes to each user's profile from user list
@app.route("/users/<int:user_id>")
def user_profile(user_id):
"""Show user information"""
# Query by user id to return that record in database about user info
user = User.query.filter(User.user_id == user_id).one()
# import pdb; pdb.set_trace()
# Query to get all movies and scores rated by this user
# Needed to join Rating and Movie tables and filter by user id
# Sort movie titles alphabetically
user_movies = db.session.query(Rating.user_id,
Rating.movie_id,
Rating.score,
Movie.title).join(Movie).filter(Rating.user_id == user_id).order_by(Movie.title).all()
# Passed user info into jinja and called on its attributes
# Passed user_movies into jinja and called on its attributes to get the info
return render_template("user_profile.html", user=user, user_movies = user_movies)
# # THIS WORKS, but we want to use /user/<int:user_id>, which we figured out above!!
# @app.route("/user-profile")
# def user_profile():
# """Show user information"""
# # import pdb; pdb.set_trace()
# # Get user email to query in User database and get all info about the user
# email = session["logged_in_user_email"]
# user = User.query.filter(User.email == email).one()
# # # Test code to see attributes of user object
# # user_id = user.user_id
# # age = user.age
# # zipcode = user.zipcode
# return render_template("user_profile.html", user=user)
@app.route("/signup-login", methods=["GET"])
def show_forms():
"""Show signup and login forms."""
return render_template("signup_login.html")
@app.route("/signup", methods=["POST"])
def signup():
"""Check if user exists in database, otherwise add user to database."""
# Get values from signup form
signup_email = request.form.get("signup_email")
signup_password = request.form.get("signup_password")
# If user exists, ask them to log in
# Otherwise, add user into database and log them in, redirecting to homepage
if db.session.query(User).filter(User.email == signup_email).first():
flash("You already have an account please use login!", "danger")
return redirect("/signup-login")
else:
new_user = User(email=signup_email, password=signup_password, age=None, zipcode=None)
db.session.add(new_user)
db.session.commit()
session["logged_in_user_email"] = signup_email
session["logged_in_user"] = new_user.user_id
flash("Your account has been created! You now are logged in!", "success")
return redirect("/")
@app.route("/login", methods=["POST"])
def login():
"""Check if user's email matches password, otherwise ask user to try again."""
# Get values from login form
login_email = request.form.get("login_email")
login_password = request.form.get("login_password")
# If user's email and password matches, log them in, redirecting them to homepage
# Otherwise, ask them to log in with the correct password
if db.session.query(User).filter(User.email == login_email,
User.password == login_password).first():
flash("Login SUCCESS.", "success")
# Query to get user's user id, in order to redirect user to their user profile
user = User.query.filter(User.email == login_email).one()
session["logged_in_user_email"] = login_email
session["logged_in_user"] = user.user_id
# Pass a variable through a string via string formatting
# so we can pass user_id into the redirected route, which is a string!!
return redirect("/users/%s" % user.user_id)
# return redirect("/")
else:
flash("Incorrect password. Please try again!", "danger")
return redirect("/signup-login")
@app.route("/logout")
def process_logout():
"""Log user out."""
del session["logged_in_user_email"]
del session["logged_in_user"]
flash("Logged out.", "success")
return redirect("/")
@app.route("/movies")
def movie_list():
"""Show list of movies."""
# sort movie titles alphbetically
movies = Movie.query.order_by(Movie.title).all()
return render_template("movie_list.html", movies=movies)
@app.route("/movies/<int:movie_id>", methods=['GET'])
def movie_profile(movie_id):
"""Show movie information.
If a user is logged in, let them add/edit a rating.
"""
if not session.get('logged_in_user_email'):
flash("Please login or signup to see the movie details and rate the movie!", "danger")
return redirect("/signup-login")
else:
# import pdb; pdb.set_trace();
# Query by movie id to return that record in database about movie info
# movie = Movie.query.filter(Movie.movie_id == movie_id).one()
movie = Movie.query.get(movie_id)
user = User.query.filter(User.email == session.get("logged_in_user_email")).one()
user_id = user.user_id
if user_id:
user_rating = Rating.query.filter_by(movie_id=movie_id, user_id=user_id).first()
else:
user_rating = None
# Prediction code: only predict if the user hasn't rated it
prediction = None
if (not user_rating) and user_id:
user = User.query.get(user_id)
if user:
prediction = user.predict_rating(movie)
# Either use the prediction or their real rating
if prediction:
# User hasn't scored; use our prediction if we made one
effective_rating = prediction
elif user_rating:
# User has already scored for real; use that
effective_rating = user_rating.score
else:
# User hasn't scored and we couldn't get a prediction
effective_rating = None
# Get the wizard's rating, either by predicting or using real rating
wizard = User.query.filter_by(email="[email protected]").one()
wizard_rating = Rating.query.filter_by(user_id=wizard.user_id, movie_id=movie.movie_id).first()
if wizard_rating is None:
wizard_rating = wizard.predict_rating(movie)
else:
wizard_rating = wizard_rating.score
if wizard_rating and effective_rating:
difference = abs(wizard_rating - effective_rating)
else:
# We couldn't get a wizard rating, so we'll skip difference
difference = None
# Depending on how different we are from the Wizard, choose a message
BERATEMENT_MESSAGES = [
"I suppose you don't have such bad taste after all.",
"I regret every decision that I've ever made that has brought me to listen to your opinion.",
"Words fail me, as your taste in movies has clearly failed you.",
"That movie is great. For a clown to watch. Idiot.",
"Words cannot express the awfulness of your taste."
]
if difference is not None:
beratement = BERATEMENT_MESSAGES[int(difference)]
else:
beratement = None
# Tallies score of each rating (how many people rated this score per rating)
# Returns list of tuples for count_score
unordered_ratings = db.session.query(Rating.score, func.count(Rating.score)).filter(Rating.movie_id == movie_id).group_by(Rating.score)
ordered_movies = unordered_ratings.order_by(Rating.score)
count_score = ordered_movies.all()
# Get average score, which returns a tuple-like object, so need to access index 0 to return the number and pass through jinja
avg_rating = db.session.query(func.avg(Rating.score)).filter(Rating.movie_id == movie_id).one()
# Query to get all ratings for a specific movie
# Needed to join Rating and Movie tables and filter by user id
# Sort movie titles alphabetically
ratings = db.session.query(Rating.movie_id,
Rating.score,
Movie.title).join(Movie).filter(Rating.movie_id == movie_id).all()
# # Pass user info into jinja and called on its attributes
# # Pass count_score, avg_rating, and ratings into jinja
# return render_template("movie_profile.html", movie=movie, count_score=count_score, avg_rating=avg_rating[0], ratings=ratings)
return render_template(
"movie_profile.html",
movie=movie,
user_rating=user_rating,
avg_rating=avg_rating[0],
count_score=count_score,
prediction=prediction,
ratings=ratings,
beratement=beratement)
@app.route("/movies/<int:movie_id>/rate-movie")
def rate_movie(movie_id):
"""Get user rating score for movie"""
user_rating = request.args.get("user_rating")
# get user id from log in email address
user_email = session["logged_in_user_email"]
user = User.query.filter(User.email == user_email).one()
user_id = user.user_id
# Check if user rating exists in database
# If user has rated this movie before, update value
# Else, add user rating to database by movie id and user id
if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).all():
# When updating a value, we need to use the key-value pair in update()
db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update({"score": user_rating})
# db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update(Rating.score == user_rating)
db.session.commit()
flash("You have rated this movie before! It has now been updated to %s." % (user_rating), "warning")
return redirect("/users/%s" % user_id)
else:
db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=user_rating))
db.session.commit()
flash("You have rated this movie a %s." % (user_rating), "info")
return redirect("/users/%s" % user_id)
# Get user rating routed correctly, as this was just test code
# Fix label format for movie profile page
return render_template("rate_movie.html", user_rating=user_rating)
if __name__ == "__main__":
# We have to set debug=True here, since it has to be True at the point
# that we invoke the DebugToolbarExtension
app.debug = True
connect_to_db(app)
# Use the DebugToolbar
# DebugToolbarExtension(app)
app.run()
|
[
"\"\"\"Movie Ratings.\"\"\"\n\nfrom jinja2 import StrictUndefined\n\nfrom flask import Flask, render_template, redirect, request, flash, session\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom sqlalchemy import func, update\n\n\nfrom model import User, Rating, Movie, connect_to_db, db\n\n\napp = Flask(__name__)\n\n# Required to use Flask sessions and the debug toolbar\napp.secret_key = \"ABC\"\n\n# Normally, if you use an undefined variable in Jinja2, it fails silently.\n# This is horrible. Fix this so that, instead, it raises an error.\napp.jinja_env.undefined = StrictUndefined\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n\n return value.strftime(format)\n\napp.jinja_env.filters['datetimefilter'] = datetimefilter\n\n\[email protected]('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n\n # We want user profile link to show if user is logged in and clicks on homepage\n\n # Check if logged in and get the value or else return None\n # If there is a value, query to get user information so that user.user_id can be accessed in jinja\n # Else, pass None value through so that if statement in jinja not executed\n user_email = session.get(\"logged_in_user_email\", None)\n if user_email is not None:\n user = User.query.filter(User.email == user_email).one()\n return render_template(\"homepage.html\", user=user)\n\n else:\n return render_template(\"homepage.html\", user=None)\n\n\[email protected](\"/users\")\ndef user_list():\n \"\"\"Show list of users.\"\"\"\n\n users = User.query.all()\n\n return render_template(\"user_list.html\", users=users)\n\n\n# This takes to each user's profile from user list\[email protected](\"/users/<int:user_id>\")\ndef user_profile(user_id):\n \"\"\"Show user information\"\"\"\n\n # Query by user id to return that record in database about user info\n user = User.query.filter(User.user_id == user_id).one()\n\n # import pdb; pdb.set_trace()\n\n # Query to get all movies and scores rated by this user\n # Needed to join Rating and Movie tables and filter by user id\n # Sort movie titles alphabetically\n user_movies = db.session.query(Rating.user_id, \n Rating.movie_id, \n Rating.score,\n Movie.title).join(Movie).filter(Rating.user_id == user_id).order_by(Movie.title).all()\n\n # Passed user info into jinja and called on its attributes\n # Passed user_movies into jinja and called on its attributes to get the info\n return render_template(\"user_profile.html\", user=user, user_movies = user_movies)\n\n\n# # THIS WORKS, but we want to use /user/<int:user_id>, which we figured out above!!\n# @app.route(\"/user-profile\")\n# def user_profile():\n# \"\"\"Show user information\"\"\"\n\n# # import pdb; pdb.set_trace()\n\n# # Get user email to query in User database and get all info about the user\n# email = session[\"logged_in_user_email\"]\n# user = User.query.filter(User.email == email).one()\n\n# # # Test code to see attributes of user object\n# # user_id = user.user_id\n# # age = user.age\n# # zipcode = user.zipcode\n\n# return render_template(\"user_profile.html\", user=user)\n\n\[email protected](\"/signup-login\", methods=[\"GET\"])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n\n return render_template(\"signup_login.html\")\n\n\[email protected](\"/signup\", methods=[\"POST\"])\ndef signup():\n \"\"\"Check if user exists in database, otherwise add user to database.\"\"\"\n\n # Get values from signup form\n signup_email = request.form.get(\"signup_email\")\n signup_password = request.form.get(\"signup_password\")\n\n # If user exists, ask them to log in\n # Otherwise, add user into database and log them in, redirecting to homepage\n if db.session.query(User).filter(User.email == signup_email).first():\n flash(\"You already have an account please use login!\", \"danger\")\n return redirect(\"/signup-login\")\n\n else:\n new_user = User(email=signup_email, password=signup_password, age=None, zipcode=None)\n db.session.add(new_user)\n db.session.commit()\n \n session[\"logged_in_user_email\"] = signup_email\n session[\"logged_in_user\"] = new_user.user_id\n \n flash(\"Your account has been created! You now are logged in!\", \"success\")\n \n return redirect(\"/\")\n\n\[email protected](\"/login\", methods=[\"POST\"])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n \n # Get values from login form\n login_email = request.form.get(\"login_email\")\n login_password = request.form.get(\"login_password\")\n\n # If user's email and password matches, log them in, redirecting them to homepage\n # Otherwise, ask them to log in with the correct password\n if db.session.query(User).filter(User.email == login_email, \n User.password == login_password).first():\n \n flash(\"Login SUCCESS.\", \"success\") \n\n # Query to get user's user id, in order to redirect user to their user profile\n user = User.query.filter(User.email == login_email).one()\n\n session[\"logged_in_user_email\"] = login_email\n session[\"logged_in_user\"] = user.user_id\n\n # Pass a variable through a string via string formatting\n # so we can pass user_id into the redirected route, which is a string!!\n return redirect(\"/users/%s\" % user.user_id)\n # return redirect(\"/\")\n\n else:\n flash(\"Incorrect password. Please try again!\", \"danger\")\n return redirect(\"/signup-login\")\n\n\[email protected](\"/logout\")\ndef process_logout():\n \"\"\"Log user out.\"\"\"\n\n del session[\"logged_in_user_email\"]\n del session[\"logged_in_user\"]\n \n flash(\"Logged out.\", \"success\")\n \n return redirect(\"/\")\n\n\[email protected](\"/movies\")\ndef movie_list():\n \"\"\"Show list of movies.\"\"\"\n\n # sort movie titles alphbetically\n movies = Movie.query.order_by(Movie.title).all()\n\n return render_template(\"movie_list.html\", movies=movies)\n\n\[email protected](\"/movies/<int:movie_id>\", methods=['GET'])\ndef movie_profile(movie_id):\n \"\"\"Show movie information.\n\n If a user is logged in, let them add/edit a rating.\n \"\"\"\n\n if not session.get('logged_in_user_email'):\n flash(\"Please login or signup to see the movie details and rate the movie!\", \"danger\")\n return redirect(\"/signup-login\")\n\n else:\n\n # import pdb; pdb.set_trace();\n\n # Query by movie id to return that record in database about movie info\n # movie = Movie.query.filter(Movie.movie_id == movie_id).one()\n movie = Movie.query.get(movie_id)\n\n user = User.query.filter(User.email == session.get(\"logged_in_user_email\")).one()\n user_id = user.user_id\n\n if user_id:\n user_rating = Rating.query.filter_by(movie_id=movie_id, user_id=user_id).first()\n else:\n user_rating = None\n\n # Prediction code: only predict if the user hasn't rated it\n prediction = None\n\n if (not user_rating) and user_id:\n user = User.query.get(user_id)\n if user:\n prediction = user.predict_rating(movie)\n\n # Either use the prediction or their real rating\n if prediction:\n # User hasn't scored; use our prediction if we made one\n effective_rating = prediction\n\n elif user_rating:\n # User has already scored for real; use that\n effective_rating = user_rating.score\n\n else:\n # User hasn't scored and we couldn't get a prediction\n effective_rating = None\n\n # Get the wizard's rating, either by predicting or using real rating\n wizard = User.query.filter_by(email=\"[email protected]\").one()\n wizard_rating = Rating.query.filter_by(user_id=wizard.user_id, movie_id=movie.movie_id).first()\n\n if wizard_rating is None:\n wizard_rating = wizard.predict_rating(movie)\n else:\n wizard_rating = wizard_rating.score\n\n if wizard_rating and effective_rating:\n difference = abs(wizard_rating - effective_rating)\n else:\n # We couldn't get a wizard rating, so we'll skip difference\n difference = None\n\n # Depending on how different we are from the Wizard, choose a message\n BERATEMENT_MESSAGES = [\n \"I suppose you don't have such bad taste after all.\",\n \"I regret every decision that I've ever made that has brought me to listen to your opinion.\",\n \"Words fail me, as your taste in movies has clearly failed you.\",\n \"That movie is great. For a clown to watch. Idiot.\",\n \"Words cannot express the awfulness of your taste.\"\n ]\n\n if difference is not None:\n beratement = BERATEMENT_MESSAGES[int(difference)]\n else:\n beratement = None\n\n # Tallies score of each rating (how many people rated this score per rating)\n # Returns list of tuples for count_score\n unordered_ratings = db.session.query(Rating.score, func.count(Rating.score)).filter(Rating.movie_id == movie_id).group_by(Rating.score)\n ordered_movies = unordered_ratings.order_by(Rating.score)\n count_score = ordered_movies.all()\n\n # Get average score, which returns a tuple-like object, so need to access index 0 to return the number and pass through jinja\n avg_rating = db.session.query(func.avg(Rating.score)).filter(Rating.movie_id == movie_id).one()\n\n # Query to get all ratings for a specific movie\n # Needed to join Rating and Movie tables and filter by user id\n # Sort movie titles alphabetically\n ratings = db.session.query(Rating.movie_id,\n Rating.score,\n Movie.title).join(Movie).filter(Rating.movie_id == movie_id).all()\n\n # # Pass user info into jinja and called on its attributes\n # # Pass count_score, avg_rating, and ratings into jinja\n # return render_template(\"movie_profile.html\", movie=movie, count_score=count_score, avg_rating=avg_rating[0], ratings=ratings)\n\n return render_template(\n \"movie_profile.html\",\n movie=movie,\n user_rating=user_rating,\n avg_rating=avg_rating[0],\n count_score=count_score,\n prediction=prediction,\n ratings=ratings,\n beratement=beratement)\n\n\[email protected](\"/movies/<int:movie_id>/rate-movie\")\ndef rate_movie(movie_id):\n \"\"\"Get user rating score for movie\"\"\"\n\n user_rating = request.args.get(\"user_rating\")\n # get user id from log in email address\n user_email = session[\"logged_in_user_email\"]\n\n user = User.query.filter(User.email == user_email).one()\n\n user_id = user.user_id\n\n # Check if user rating exists in database\n # If user has rated this movie before, update value\n # Else, add user rating to database by movie id and user id\n if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).all():\n # When updating a value, we need to use the key-value pair in update()\n db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update({\"score\": user_rating})\n\n # db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update(Rating.score == user_rating)\n db.session.commit()\n\n flash(\"You have rated this movie before! It has now been updated to %s.\" % (user_rating), \"warning\")\n return redirect(\"/users/%s\" % user_id)\n\n else:\n db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=user_rating))\n db.session.commit()\n \n flash(\"You have rated this movie a %s.\" % (user_rating), \"info\")\n \n return redirect(\"/users/%s\" % user_id)\n\n\n # Get user rating routed correctly, as this was just test code\n # Fix label format for movie profile page\n\n return render_template(\"rate_movie.html\", user_rating=user_rating)\n\n\n\nif __name__ == \"__main__\":\n # We have to set debug=True here, since it has to be True at the point\n # that we invoke the DebugToolbarExtension\n app.debug = True\n\n connect_to_db(app)\n\n # Use the DebugToolbar\n # DebugToolbarExtension(app)\n\n app.run()\n",
"<docstring token>\nfrom jinja2 import StrictUndefined\nfrom flask import Flask, render_template, redirect, request, flash, session\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom sqlalchemy import func, update\nfrom model import User, Rating, Movie, connect_to_db, db\napp = Flask(__name__)\napp.secret_key = 'ABC'\napp.jinja_env.undefined = StrictUndefined\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n return value.strftime(format)\n\n\napp.jinja_env.filters['datetimefilter'] = datetimefilter\n\n\[email protected]('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n user_email = session.get('logged_in_user_email', None)\n if user_email is not None:\n user = User.query.filter(User.email == user_email).one()\n return render_template('homepage.html', user=user)\n else:\n return render_template('homepage.html', user=None)\n\n\[email protected]('/users')\ndef user_list():\n \"\"\"Show list of users.\"\"\"\n users = User.query.all()\n return render_template('user_list.html', users=users)\n\n\[email protected]('/users/<int:user_id>')\ndef user_profile(user_id):\n \"\"\"Show user information\"\"\"\n user = User.query.filter(User.user_id == user_id).one()\n user_movies = db.session.query(Rating.user_id, Rating.movie_id, Rating.\n score, Movie.title).join(Movie).filter(Rating.user_id == user_id\n ).order_by(Movie.title).all()\n return render_template('user_profile.html', user=user, user_movies=\n user_movies)\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\[email protected]('/signup', methods=['POST'])\ndef signup():\n \"\"\"Check if user exists in database, otherwise add user to database.\"\"\"\n signup_email = request.form.get('signup_email')\n signup_password = request.form.get('signup_password')\n if db.session.query(User).filter(User.email == signup_email).first():\n flash('You already have an account please use login!', 'danger')\n return redirect('/signup-login')\n else:\n new_user = User(email=signup_email, password=signup_password, age=\n None, zipcode=None)\n db.session.add(new_user)\n db.session.commit()\n session['logged_in_user_email'] = signup_email\n session['logged_in_user'] = new_user.user_id\n flash('Your account has been created! You now are logged in!',\n 'success')\n return redirect('/')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\[email protected]('/logout')\ndef process_logout():\n \"\"\"Log user out.\"\"\"\n del session['logged_in_user_email']\n del session['logged_in_user']\n flash('Logged out.', 'success')\n return redirect('/')\n\n\[email protected]('/movies')\ndef movie_list():\n \"\"\"Show list of movies.\"\"\"\n movies = Movie.query.order_by(Movie.title).all()\n return render_template('movie_list.html', movies=movies)\n\n\[email protected]('/movies/<int:movie_id>', methods=['GET'])\ndef movie_profile(movie_id):\n \"\"\"Show movie information.\n\n If a user is logged in, let them add/edit a rating.\n \"\"\"\n if not session.get('logged_in_user_email'):\n flash(\n 'Please login or signup to see the movie details and rate the movie!'\n , 'danger')\n return redirect('/signup-login')\n else:\n movie = Movie.query.get(movie_id)\n user = User.query.filter(User.email == session.get(\n 'logged_in_user_email')).one()\n user_id = user.user_id\n if user_id:\n user_rating = Rating.query.filter_by(movie_id=movie_id, user_id\n =user_id).first()\n else:\n user_rating = None\n prediction = None\n if not user_rating and user_id:\n user = User.query.get(user_id)\n if user:\n prediction = user.predict_rating(movie)\n if prediction:\n effective_rating = prediction\n elif user_rating:\n effective_rating = user_rating.score\n else:\n effective_rating = None\n wizard = User.query.filter_by(email='[email protected]').one()\n wizard_rating = Rating.query.filter_by(user_id=wizard.user_id,\n movie_id=movie.movie_id).first()\n if wizard_rating is None:\n wizard_rating = wizard.predict_rating(movie)\n else:\n wizard_rating = wizard_rating.score\n if wizard_rating and effective_rating:\n difference = abs(wizard_rating - effective_rating)\n else:\n difference = None\n BERATEMENT_MESSAGES = [\n \"I suppose you don't have such bad taste after all.\",\n \"I regret every decision that I've ever made that has brought me to listen to your opinion.\"\n ,\n 'Words fail me, as your taste in movies has clearly failed you.',\n 'That movie is great. For a clown to watch. Idiot.',\n 'Words cannot express the awfulness of your taste.']\n if difference is not None:\n beratement = BERATEMENT_MESSAGES[int(difference)]\n else:\n beratement = None\n unordered_ratings = db.session.query(Rating.score, func.count(\n Rating.score)).filter(Rating.movie_id == movie_id).group_by(Rating\n .score)\n ordered_movies = unordered_ratings.order_by(Rating.score)\n count_score = ordered_movies.all()\n avg_rating = db.session.query(func.avg(Rating.score)).filter(Rating\n .movie_id == movie_id).one()\n ratings = db.session.query(Rating.movie_id, Rating.score, Movie.title\n ).join(Movie).filter(Rating.movie_id == movie_id).all()\n return render_template('movie_profile.html', movie=movie,\n user_rating=user_rating, avg_rating=avg_rating[0], count_score=\n count_score, prediction=prediction, ratings=ratings, beratement\n =beratement)\n\n\[email protected]('/movies/<int:movie_id>/rate-movie')\ndef rate_movie(movie_id):\n \"\"\"Get user rating score for movie\"\"\"\n user_rating = request.args.get('user_rating')\n user_email = session['logged_in_user_email']\n user = User.query.filter(User.email == user_email).one()\n user_id = user.user_id\n if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, \n Rating.user_id == user_id).all():\n db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating\n .user_id == user_id).update({'score': user_rating})\n db.session.commit()\n flash(\n 'You have rated this movie before! It has now been updated to %s.'\n % user_rating, 'warning')\n return redirect('/users/%s' % user_id)\n else:\n db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=\n user_rating))\n db.session.commit()\n flash('You have rated this movie a %s.' % user_rating, 'info')\n return redirect('/users/%s' % user_id)\n return render_template('rate_movie.html', user_rating=user_rating)\n\n\nif __name__ == '__main__':\n app.debug = True\n connect_to_db(app)\n app.run()\n",
"<docstring token>\n<import token>\napp = Flask(__name__)\napp.secret_key = 'ABC'\napp.jinja_env.undefined = StrictUndefined\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n return value.strftime(format)\n\n\napp.jinja_env.filters['datetimefilter'] = datetimefilter\n\n\[email protected]('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n user_email = session.get('logged_in_user_email', None)\n if user_email is not None:\n user = User.query.filter(User.email == user_email).one()\n return render_template('homepage.html', user=user)\n else:\n return render_template('homepage.html', user=None)\n\n\[email protected]('/users')\ndef user_list():\n \"\"\"Show list of users.\"\"\"\n users = User.query.all()\n return render_template('user_list.html', users=users)\n\n\[email protected]('/users/<int:user_id>')\ndef user_profile(user_id):\n \"\"\"Show user information\"\"\"\n user = User.query.filter(User.user_id == user_id).one()\n user_movies = db.session.query(Rating.user_id, Rating.movie_id, Rating.\n score, Movie.title).join(Movie).filter(Rating.user_id == user_id\n ).order_by(Movie.title).all()\n return render_template('user_profile.html', user=user, user_movies=\n user_movies)\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\[email protected]('/signup', methods=['POST'])\ndef signup():\n \"\"\"Check if user exists in database, otherwise add user to database.\"\"\"\n signup_email = request.form.get('signup_email')\n signup_password = request.form.get('signup_password')\n if db.session.query(User).filter(User.email == signup_email).first():\n flash('You already have an account please use login!', 'danger')\n return redirect('/signup-login')\n else:\n new_user = User(email=signup_email, password=signup_password, age=\n None, zipcode=None)\n db.session.add(new_user)\n db.session.commit()\n session['logged_in_user_email'] = signup_email\n session['logged_in_user'] = new_user.user_id\n flash('Your account has been created! You now are logged in!',\n 'success')\n return redirect('/')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\[email protected]('/logout')\ndef process_logout():\n \"\"\"Log user out.\"\"\"\n del session['logged_in_user_email']\n del session['logged_in_user']\n flash('Logged out.', 'success')\n return redirect('/')\n\n\[email protected]('/movies')\ndef movie_list():\n \"\"\"Show list of movies.\"\"\"\n movies = Movie.query.order_by(Movie.title).all()\n return render_template('movie_list.html', movies=movies)\n\n\[email protected]('/movies/<int:movie_id>', methods=['GET'])\ndef movie_profile(movie_id):\n \"\"\"Show movie information.\n\n If a user is logged in, let them add/edit a rating.\n \"\"\"\n if not session.get('logged_in_user_email'):\n flash(\n 'Please login or signup to see the movie details and rate the movie!'\n , 'danger')\n return redirect('/signup-login')\n else:\n movie = Movie.query.get(movie_id)\n user = User.query.filter(User.email == session.get(\n 'logged_in_user_email')).one()\n user_id = user.user_id\n if user_id:\n user_rating = Rating.query.filter_by(movie_id=movie_id, user_id\n =user_id).first()\n else:\n user_rating = None\n prediction = None\n if not user_rating and user_id:\n user = User.query.get(user_id)\n if user:\n prediction = user.predict_rating(movie)\n if prediction:\n effective_rating = prediction\n elif user_rating:\n effective_rating = user_rating.score\n else:\n effective_rating = None\n wizard = User.query.filter_by(email='[email protected]').one()\n wizard_rating = Rating.query.filter_by(user_id=wizard.user_id,\n movie_id=movie.movie_id).first()\n if wizard_rating is None:\n wizard_rating = wizard.predict_rating(movie)\n else:\n wizard_rating = wizard_rating.score\n if wizard_rating and effective_rating:\n difference = abs(wizard_rating - effective_rating)\n else:\n difference = None\n BERATEMENT_MESSAGES = [\n \"I suppose you don't have such bad taste after all.\",\n \"I regret every decision that I've ever made that has brought me to listen to your opinion.\"\n ,\n 'Words fail me, as your taste in movies has clearly failed you.',\n 'That movie is great. For a clown to watch. Idiot.',\n 'Words cannot express the awfulness of your taste.']\n if difference is not None:\n beratement = BERATEMENT_MESSAGES[int(difference)]\n else:\n beratement = None\n unordered_ratings = db.session.query(Rating.score, func.count(\n Rating.score)).filter(Rating.movie_id == movie_id).group_by(Rating\n .score)\n ordered_movies = unordered_ratings.order_by(Rating.score)\n count_score = ordered_movies.all()\n avg_rating = db.session.query(func.avg(Rating.score)).filter(Rating\n .movie_id == movie_id).one()\n ratings = db.session.query(Rating.movie_id, Rating.score, Movie.title\n ).join(Movie).filter(Rating.movie_id == movie_id).all()\n return render_template('movie_profile.html', movie=movie,\n user_rating=user_rating, avg_rating=avg_rating[0], count_score=\n count_score, prediction=prediction, ratings=ratings, beratement\n =beratement)\n\n\[email protected]('/movies/<int:movie_id>/rate-movie')\ndef rate_movie(movie_id):\n \"\"\"Get user rating score for movie\"\"\"\n user_rating = request.args.get('user_rating')\n user_email = session['logged_in_user_email']\n user = User.query.filter(User.email == user_email).one()\n user_id = user.user_id\n if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, \n Rating.user_id == user_id).all():\n db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating\n .user_id == user_id).update({'score': user_rating})\n db.session.commit()\n flash(\n 'You have rated this movie before! It has now been updated to %s.'\n % user_rating, 'warning')\n return redirect('/users/%s' % user_id)\n else:\n db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=\n user_rating))\n db.session.commit()\n flash('You have rated this movie a %s.' % user_rating, 'info')\n return redirect('/users/%s' % user_id)\n return render_template('rate_movie.html', user_rating=user_rating)\n\n\nif __name__ == '__main__':\n app.debug = True\n connect_to_db(app)\n app.run()\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n return value.strftime(format)\n\n\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n user_email = session.get('logged_in_user_email', None)\n if user_email is not None:\n user = User.query.filter(User.email == user_email).one()\n return render_template('homepage.html', user=user)\n else:\n return render_template('homepage.html', user=None)\n\n\[email protected]('/users')\ndef user_list():\n \"\"\"Show list of users.\"\"\"\n users = User.query.all()\n return render_template('user_list.html', users=users)\n\n\[email protected]('/users/<int:user_id>')\ndef user_profile(user_id):\n \"\"\"Show user information\"\"\"\n user = User.query.filter(User.user_id == user_id).one()\n user_movies = db.session.query(Rating.user_id, Rating.movie_id, Rating.\n score, Movie.title).join(Movie).filter(Rating.user_id == user_id\n ).order_by(Movie.title).all()\n return render_template('user_profile.html', user=user, user_movies=\n user_movies)\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\[email protected]('/signup', methods=['POST'])\ndef signup():\n \"\"\"Check if user exists in database, otherwise add user to database.\"\"\"\n signup_email = request.form.get('signup_email')\n signup_password = request.form.get('signup_password')\n if db.session.query(User).filter(User.email == signup_email).first():\n flash('You already have an account please use login!', 'danger')\n return redirect('/signup-login')\n else:\n new_user = User(email=signup_email, password=signup_password, age=\n None, zipcode=None)\n db.session.add(new_user)\n db.session.commit()\n session['logged_in_user_email'] = signup_email\n session['logged_in_user'] = new_user.user_id\n flash('Your account has been created! You now are logged in!',\n 'success')\n return redirect('/')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\[email protected]('/logout')\ndef process_logout():\n \"\"\"Log user out.\"\"\"\n del session['logged_in_user_email']\n del session['logged_in_user']\n flash('Logged out.', 'success')\n return redirect('/')\n\n\[email protected]('/movies')\ndef movie_list():\n \"\"\"Show list of movies.\"\"\"\n movies = Movie.query.order_by(Movie.title).all()\n return render_template('movie_list.html', movies=movies)\n\n\[email protected]('/movies/<int:movie_id>', methods=['GET'])\ndef movie_profile(movie_id):\n \"\"\"Show movie information.\n\n If a user is logged in, let them add/edit a rating.\n \"\"\"\n if not session.get('logged_in_user_email'):\n flash(\n 'Please login or signup to see the movie details and rate the movie!'\n , 'danger')\n return redirect('/signup-login')\n else:\n movie = Movie.query.get(movie_id)\n user = User.query.filter(User.email == session.get(\n 'logged_in_user_email')).one()\n user_id = user.user_id\n if user_id:\n user_rating = Rating.query.filter_by(movie_id=movie_id, user_id\n =user_id).first()\n else:\n user_rating = None\n prediction = None\n if not user_rating and user_id:\n user = User.query.get(user_id)\n if user:\n prediction = user.predict_rating(movie)\n if prediction:\n effective_rating = prediction\n elif user_rating:\n effective_rating = user_rating.score\n else:\n effective_rating = None\n wizard = User.query.filter_by(email='[email protected]').one()\n wizard_rating = Rating.query.filter_by(user_id=wizard.user_id,\n movie_id=movie.movie_id).first()\n if wizard_rating is None:\n wizard_rating = wizard.predict_rating(movie)\n else:\n wizard_rating = wizard_rating.score\n if wizard_rating and effective_rating:\n difference = abs(wizard_rating - effective_rating)\n else:\n difference = None\n BERATEMENT_MESSAGES = [\n \"I suppose you don't have such bad taste after all.\",\n \"I regret every decision that I've ever made that has brought me to listen to your opinion.\"\n ,\n 'Words fail me, as your taste in movies has clearly failed you.',\n 'That movie is great. For a clown to watch. Idiot.',\n 'Words cannot express the awfulness of your taste.']\n if difference is not None:\n beratement = BERATEMENT_MESSAGES[int(difference)]\n else:\n beratement = None\n unordered_ratings = db.session.query(Rating.score, func.count(\n Rating.score)).filter(Rating.movie_id == movie_id).group_by(Rating\n .score)\n ordered_movies = unordered_ratings.order_by(Rating.score)\n count_score = ordered_movies.all()\n avg_rating = db.session.query(func.avg(Rating.score)).filter(Rating\n .movie_id == movie_id).one()\n ratings = db.session.query(Rating.movie_id, Rating.score, Movie.title\n ).join(Movie).filter(Rating.movie_id == movie_id).all()\n return render_template('movie_profile.html', movie=movie,\n user_rating=user_rating, avg_rating=avg_rating[0], count_score=\n count_score, prediction=prediction, ratings=ratings, beratement\n =beratement)\n\n\[email protected]('/movies/<int:movie_id>/rate-movie')\ndef rate_movie(movie_id):\n \"\"\"Get user rating score for movie\"\"\"\n user_rating = request.args.get('user_rating')\n user_email = session['logged_in_user_email']\n user = User.query.filter(User.email == user_email).one()\n user_id = user.user_id\n if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, \n Rating.user_id == user_id).all():\n db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating\n .user_id == user_id).update({'score': user_rating})\n db.session.commit()\n flash(\n 'You have rated this movie before! It has now been updated to %s.'\n % user_rating, 'warning')\n return redirect('/users/%s' % user_id)\n else:\n db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=\n user_rating))\n db.session.commit()\n flash('You have rated this movie a %s.' % user_rating, 'info')\n return redirect('/users/%s' % user_id)\n return render_template('rate_movie.html', user_rating=user_rating)\n\n\nif __name__ == '__main__':\n app.debug = True\n connect_to_db(app)\n app.run()\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n return value.strftime(format)\n\n\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n user_email = session.get('logged_in_user_email', None)\n if user_email is not None:\n user = User.query.filter(User.email == user_email).one()\n return render_template('homepage.html', user=user)\n else:\n return render_template('homepage.html', user=None)\n\n\[email protected]('/users')\ndef user_list():\n \"\"\"Show list of users.\"\"\"\n users = User.query.all()\n return render_template('user_list.html', users=users)\n\n\[email protected]('/users/<int:user_id>')\ndef user_profile(user_id):\n \"\"\"Show user information\"\"\"\n user = User.query.filter(User.user_id == user_id).one()\n user_movies = db.session.query(Rating.user_id, Rating.movie_id, Rating.\n score, Movie.title).join(Movie).filter(Rating.user_id == user_id\n ).order_by(Movie.title).all()\n return render_template('user_profile.html', user=user, user_movies=\n user_movies)\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\[email protected]('/signup', methods=['POST'])\ndef signup():\n \"\"\"Check if user exists in database, otherwise add user to database.\"\"\"\n signup_email = request.form.get('signup_email')\n signup_password = request.form.get('signup_password')\n if db.session.query(User).filter(User.email == signup_email).first():\n flash('You already have an account please use login!', 'danger')\n return redirect('/signup-login')\n else:\n new_user = User(email=signup_email, password=signup_password, age=\n None, zipcode=None)\n db.session.add(new_user)\n db.session.commit()\n session['logged_in_user_email'] = signup_email\n session['logged_in_user'] = new_user.user_id\n flash('Your account has been created! You now are logged in!',\n 'success')\n return redirect('/')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\[email protected]('/logout')\ndef process_logout():\n \"\"\"Log user out.\"\"\"\n del session['logged_in_user_email']\n del session['logged_in_user']\n flash('Logged out.', 'success')\n return redirect('/')\n\n\[email protected]('/movies')\ndef movie_list():\n \"\"\"Show list of movies.\"\"\"\n movies = Movie.query.order_by(Movie.title).all()\n return render_template('movie_list.html', movies=movies)\n\n\[email protected]('/movies/<int:movie_id>', methods=['GET'])\ndef movie_profile(movie_id):\n \"\"\"Show movie information.\n\n If a user is logged in, let them add/edit a rating.\n \"\"\"\n if not session.get('logged_in_user_email'):\n flash(\n 'Please login or signup to see the movie details and rate the movie!'\n , 'danger')\n return redirect('/signup-login')\n else:\n movie = Movie.query.get(movie_id)\n user = User.query.filter(User.email == session.get(\n 'logged_in_user_email')).one()\n user_id = user.user_id\n if user_id:\n user_rating = Rating.query.filter_by(movie_id=movie_id, user_id\n =user_id).first()\n else:\n user_rating = None\n prediction = None\n if not user_rating and user_id:\n user = User.query.get(user_id)\n if user:\n prediction = user.predict_rating(movie)\n if prediction:\n effective_rating = prediction\n elif user_rating:\n effective_rating = user_rating.score\n else:\n effective_rating = None\n wizard = User.query.filter_by(email='[email protected]').one()\n wizard_rating = Rating.query.filter_by(user_id=wizard.user_id,\n movie_id=movie.movie_id).first()\n if wizard_rating is None:\n wizard_rating = wizard.predict_rating(movie)\n else:\n wizard_rating = wizard_rating.score\n if wizard_rating and effective_rating:\n difference = abs(wizard_rating - effective_rating)\n else:\n difference = None\n BERATEMENT_MESSAGES = [\n \"I suppose you don't have such bad taste after all.\",\n \"I regret every decision that I've ever made that has brought me to listen to your opinion.\"\n ,\n 'Words fail me, as your taste in movies has clearly failed you.',\n 'That movie is great. For a clown to watch. Idiot.',\n 'Words cannot express the awfulness of your taste.']\n if difference is not None:\n beratement = BERATEMENT_MESSAGES[int(difference)]\n else:\n beratement = None\n unordered_ratings = db.session.query(Rating.score, func.count(\n Rating.score)).filter(Rating.movie_id == movie_id).group_by(Rating\n .score)\n ordered_movies = unordered_ratings.order_by(Rating.score)\n count_score = ordered_movies.all()\n avg_rating = db.session.query(func.avg(Rating.score)).filter(Rating\n .movie_id == movie_id).one()\n ratings = db.session.query(Rating.movie_id, Rating.score, Movie.title\n ).join(Movie).filter(Rating.movie_id == movie_id).all()\n return render_template('movie_profile.html', movie=movie,\n user_rating=user_rating, avg_rating=avg_rating[0], count_score=\n count_score, prediction=prediction, ratings=ratings, beratement\n =beratement)\n\n\[email protected]('/movies/<int:movie_id>/rate-movie')\ndef rate_movie(movie_id):\n \"\"\"Get user rating score for movie\"\"\"\n user_rating = request.args.get('user_rating')\n user_email = session['logged_in_user_email']\n user = User.query.filter(User.email == user_email).one()\n user_id = user.user_id\n if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, \n Rating.user_id == user_id).all():\n db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating\n .user_id == user_id).update({'score': user_rating})\n db.session.commit()\n flash(\n 'You have rated this movie before! It has now been updated to %s.'\n % user_rating, 'warning')\n return redirect('/users/%s' % user_id)\n else:\n db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=\n user_rating))\n db.session.commit()\n flash('You have rated this movie a %s.' % user_rating, 'info')\n return redirect('/users/%s' % user_id)\n return render_template('rate_movie.html', user_rating=user_rating)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n return value.strftime(format)\n\n\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n user_email = session.get('logged_in_user_email', None)\n if user_email is not None:\n user = User.query.filter(User.email == user_email).one()\n return render_template('homepage.html', user=user)\n else:\n return render_template('homepage.html', user=None)\n\n\[email protected]('/users')\ndef user_list():\n \"\"\"Show list of users.\"\"\"\n users = User.query.all()\n return render_template('user_list.html', users=users)\n\n\n<function token>\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\[email protected]('/signup', methods=['POST'])\ndef signup():\n \"\"\"Check if user exists in database, otherwise add user to database.\"\"\"\n signup_email = request.form.get('signup_email')\n signup_password = request.form.get('signup_password')\n if db.session.query(User).filter(User.email == signup_email).first():\n flash('You already have an account please use login!', 'danger')\n return redirect('/signup-login')\n else:\n new_user = User(email=signup_email, password=signup_password, age=\n None, zipcode=None)\n db.session.add(new_user)\n db.session.commit()\n session['logged_in_user_email'] = signup_email\n session['logged_in_user'] = new_user.user_id\n flash('Your account has been created! You now are logged in!',\n 'success')\n return redirect('/')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\[email protected]('/logout')\ndef process_logout():\n \"\"\"Log user out.\"\"\"\n del session['logged_in_user_email']\n del session['logged_in_user']\n flash('Logged out.', 'success')\n return redirect('/')\n\n\[email protected]('/movies')\ndef movie_list():\n \"\"\"Show list of movies.\"\"\"\n movies = Movie.query.order_by(Movie.title).all()\n return render_template('movie_list.html', movies=movies)\n\n\[email protected]('/movies/<int:movie_id>', methods=['GET'])\ndef movie_profile(movie_id):\n \"\"\"Show movie information.\n\n If a user is logged in, let them add/edit a rating.\n \"\"\"\n if not session.get('logged_in_user_email'):\n flash(\n 'Please login or signup to see the movie details and rate the movie!'\n , 'danger')\n return redirect('/signup-login')\n else:\n movie = Movie.query.get(movie_id)\n user = User.query.filter(User.email == session.get(\n 'logged_in_user_email')).one()\n user_id = user.user_id\n if user_id:\n user_rating = Rating.query.filter_by(movie_id=movie_id, user_id\n =user_id).first()\n else:\n user_rating = None\n prediction = None\n if not user_rating and user_id:\n user = User.query.get(user_id)\n if user:\n prediction = user.predict_rating(movie)\n if prediction:\n effective_rating = prediction\n elif user_rating:\n effective_rating = user_rating.score\n else:\n effective_rating = None\n wizard = User.query.filter_by(email='[email protected]').one()\n wizard_rating = Rating.query.filter_by(user_id=wizard.user_id,\n movie_id=movie.movie_id).first()\n if wizard_rating is None:\n wizard_rating = wizard.predict_rating(movie)\n else:\n wizard_rating = wizard_rating.score\n if wizard_rating and effective_rating:\n difference = abs(wizard_rating - effective_rating)\n else:\n difference = None\n BERATEMENT_MESSAGES = [\n \"I suppose you don't have such bad taste after all.\",\n \"I regret every decision that I've ever made that has brought me to listen to your opinion.\"\n ,\n 'Words fail me, as your taste in movies has clearly failed you.',\n 'That movie is great. For a clown to watch. Idiot.',\n 'Words cannot express the awfulness of your taste.']\n if difference is not None:\n beratement = BERATEMENT_MESSAGES[int(difference)]\n else:\n beratement = None\n unordered_ratings = db.session.query(Rating.score, func.count(\n Rating.score)).filter(Rating.movie_id == movie_id).group_by(Rating\n .score)\n ordered_movies = unordered_ratings.order_by(Rating.score)\n count_score = ordered_movies.all()\n avg_rating = db.session.query(func.avg(Rating.score)).filter(Rating\n .movie_id == movie_id).one()\n ratings = db.session.query(Rating.movie_id, Rating.score, Movie.title\n ).join(Movie).filter(Rating.movie_id == movie_id).all()\n return render_template('movie_profile.html', movie=movie,\n user_rating=user_rating, avg_rating=avg_rating[0], count_score=\n count_score, prediction=prediction, ratings=ratings, beratement\n =beratement)\n\n\[email protected]('/movies/<int:movie_id>/rate-movie')\ndef rate_movie(movie_id):\n \"\"\"Get user rating score for movie\"\"\"\n user_rating = request.args.get('user_rating')\n user_email = session['logged_in_user_email']\n user = User.query.filter(User.email == user_email).one()\n user_id = user.user_id\n if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, \n Rating.user_id == user_id).all():\n db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating\n .user_id == user_id).update({'score': user_rating})\n db.session.commit()\n flash(\n 'You have rated this movie before! It has now been updated to %s.'\n % user_rating, 'warning')\n return redirect('/users/%s' % user_id)\n else:\n db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=\n user_rating))\n db.session.commit()\n flash('You have rated this movie a %s.' % user_rating, 'info')\n return redirect('/users/%s' % user_id)\n return render_template('rate_movie.html', user_rating=user_rating)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n return value.strftime(format)\n\n\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n user_email = session.get('logged_in_user_email', None)\n if user_email is not None:\n user = User.query.filter(User.email == user_email).one()\n return render_template('homepage.html', user=user)\n else:\n return render_template('homepage.html', user=None)\n\n\[email protected]('/users')\ndef user_list():\n \"\"\"Show list of users.\"\"\"\n users = User.query.all()\n return render_template('user_list.html', users=users)\n\n\n<function token>\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\[email protected]('/signup', methods=['POST'])\ndef signup():\n \"\"\"Check if user exists in database, otherwise add user to database.\"\"\"\n signup_email = request.form.get('signup_email')\n signup_password = request.form.get('signup_password')\n if db.session.query(User).filter(User.email == signup_email).first():\n flash('You already have an account please use login!', 'danger')\n return redirect('/signup-login')\n else:\n new_user = User(email=signup_email, password=signup_password, age=\n None, zipcode=None)\n db.session.add(new_user)\n db.session.commit()\n session['logged_in_user_email'] = signup_email\n session['logged_in_user'] = new_user.user_id\n flash('Your account has been created! You now are logged in!',\n 'success')\n return redirect('/')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\[email protected]('/logout')\ndef process_logout():\n \"\"\"Log user out.\"\"\"\n del session['logged_in_user_email']\n del session['logged_in_user']\n flash('Logged out.', 'success')\n return redirect('/')\n\n\[email protected]('/movies')\ndef movie_list():\n \"\"\"Show list of movies.\"\"\"\n movies = Movie.query.order_by(Movie.title).all()\n return render_template('movie_list.html', movies=movies)\n\n\n<function token>\n\n\[email protected]('/movies/<int:movie_id>/rate-movie')\ndef rate_movie(movie_id):\n \"\"\"Get user rating score for movie\"\"\"\n user_rating = request.args.get('user_rating')\n user_email = session['logged_in_user_email']\n user = User.query.filter(User.email == user_email).one()\n user_id = user.user_id\n if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, \n Rating.user_id == user_id).all():\n db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating\n .user_id == user_id).update({'score': user_rating})\n db.session.commit()\n flash(\n 'You have rated this movie before! It has now been updated to %s.'\n % user_rating, 'warning')\n return redirect('/users/%s' % user_id)\n else:\n db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=\n user_rating))\n db.session.commit()\n flash('You have rated this movie a %s.' % user_rating, 'info')\n return redirect('/users/%s' % user_id)\n return render_template('rate_movie.html', user_rating=user_rating)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n return value.strftime(format)\n\n\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n user_email = session.get('logged_in_user_email', None)\n if user_email is not None:\n user = User.query.filter(User.email == user_email).one()\n return render_template('homepage.html', user=user)\n else:\n return render_template('homepage.html', user=None)\n\n\[email protected]('/users')\ndef user_list():\n \"\"\"Show list of users.\"\"\"\n users = User.query.all()\n return render_template('user_list.html', users=users)\n\n\n<function token>\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\[email protected]('/signup', methods=['POST'])\ndef signup():\n \"\"\"Check if user exists in database, otherwise add user to database.\"\"\"\n signup_email = request.form.get('signup_email')\n signup_password = request.form.get('signup_password')\n if db.session.query(User).filter(User.email == signup_email).first():\n flash('You already have an account please use login!', 'danger')\n return redirect('/signup-login')\n else:\n new_user = User(email=signup_email, password=signup_password, age=\n None, zipcode=None)\n db.session.add(new_user)\n db.session.commit()\n session['logged_in_user_email'] = signup_email\n session['logged_in_user'] = new_user.user_id\n flash('Your account has been created! You now are logged in!',\n 'success')\n return redirect('/')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\[email protected]('/logout')\ndef process_logout():\n \"\"\"Log user out.\"\"\"\n del session['logged_in_user_email']\n del session['logged_in_user']\n flash('Logged out.', 'success')\n return redirect('/')\n\n\n<function token>\n<function token>\n\n\[email protected]('/movies/<int:movie_id>/rate-movie')\ndef rate_movie(movie_id):\n \"\"\"Get user rating score for movie\"\"\"\n user_rating = request.args.get('user_rating')\n user_email = session['logged_in_user_email']\n user = User.query.filter(User.email == user_email).one()\n user_id = user.user_id\n if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, \n Rating.user_id == user_id).all():\n db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating\n .user_id == user_id).update({'score': user_rating})\n db.session.commit()\n flash(\n 'You have rated this movie before! It has now been updated to %s.'\n % user_rating, 'warning')\n return redirect('/users/%s' % user_id)\n else:\n db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=\n user_rating))\n db.session.commit()\n flash('You have rated this movie a %s.' % user_rating, 'info')\n return redirect('/users/%s' % user_id)\n return render_template('rate_movie.html', user_rating=user_rating)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n return value.strftime(format)\n\n\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n user_email = session.get('logged_in_user_email', None)\n if user_email is not None:\n user = User.query.filter(User.email == user_email).one()\n return render_template('homepage.html', user=user)\n else:\n return render_template('homepage.html', user=None)\n\n\[email protected]('/users')\ndef user_list():\n \"\"\"Show list of users.\"\"\"\n users = User.query.all()\n return render_template('user_list.html', users=users)\n\n\n<function token>\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\[email protected]('/signup', methods=['POST'])\ndef signup():\n \"\"\"Check if user exists in database, otherwise add user to database.\"\"\"\n signup_email = request.form.get('signup_email')\n signup_password = request.form.get('signup_password')\n if db.session.query(User).filter(User.email == signup_email).first():\n flash('You already have an account please use login!', 'danger')\n return redirect('/signup-login')\n else:\n new_user = User(email=signup_email, password=signup_password, age=\n None, zipcode=None)\n db.session.add(new_user)\n db.session.commit()\n session['logged_in_user_email'] = signup_email\n session['logged_in_user'] = new_user.user_id\n flash('Your account has been created! You now are logged in!',\n 'success')\n return redirect('/')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/movies/<int:movie_id>/rate-movie')\ndef rate_movie(movie_id):\n \"\"\"Get user rating score for movie\"\"\"\n user_rating = request.args.get('user_rating')\n user_email = session['logged_in_user_email']\n user = User.query.filter(User.email == user_email).one()\n user_id = user.user_id\n if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, \n Rating.user_id == user_id).all():\n db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating\n .user_id == user_id).update({'score': user_rating})\n db.session.commit()\n flash(\n 'You have rated this movie before! It has now been updated to %s.'\n % user_rating, 'warning')\n return redirect('/users/%s' % user_id)\n else:\n db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=\n user_rating))\n db.session.commit()\n flash('You have rated this movie a %s.' % user_rating, 'info')\n return redirect('/users/%s' % user_id)\n return render_template('rate_movie.html', user_rating=user_rating)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n return value.strftime(format)\n\n\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n user_email = session.get('logged_in_user_email', None)\n if user_email is not None:\n user = User.query.filter(User.email == user_email).one()\n return render_template('homepage.html', user=user)\n else:\n return render_template('homepage.html', user=None)\n\n\n<function token>\n<function token>\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\[email protected]('/signup', methods=['POST'])\ndef signup():\n \"\"\"Check if user exists in database, otherwise add user to database.\"\"\"\n signup_email = request.form.get('signup_email')\n signup_password = request.form.get('signup_password')\n if db.session.query(User).filter(User.email == signup_email).first():\n flash('You already have an account please use login!', 'danger')\n return redirect('/signup-login')\n else:\n new_user = User(email=signup_email, password=signup_password, age=\n None, zipcode=None)\n db.session.add(new_user)\n db.session.commit()\n session['logged_in_user_email'] = signup_email\n session['logged_in_user'] = new_user.user_id\n flash('Your account has been created! You now are logged in!',\n 'success')\n return redirect('/')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/movies/<int:movie_id>/rate-movie')\ndef rate_movie(movie_id):\n \"\"\"Get user rating score for movie\"\"\"\n user_rating = request.args.get('user_rating')\n user_email = session['logged_in_user_email']\n user = User.query.filter(User.email == user_email).one()\n user_id = user.user_id\n if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, \n Rating.user_id == user_id).all():\n db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating\n .user_id == user_id).update({'score': user_rating})\n db.session.commit()\n flash(\n 'You have rated this movie before! It has now been updated to %s.'\n % user_rating, 'warning')\n return redirect('/users/%s' % user_id)\n else:\n db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=\n user_rating))\n db.session.commit()\n flash('You have rated this movie a %s.' % user_rating, 'info')\n return redirect('/users/%s' % user_id)\n return render_template('rate_movie.html', user_rating=user_rating)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n return value.strftime(format)\n\n\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n user_email = session.get('logged_in_user_email', None)\n if user_email is not None:\n user = User.query.filter(User.email == user_email).one()\n return render_template('homepage.html', user=user)\n else:\n return render_template('homepage.html', user=None)\n\n\n<function token>\n<function token>\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\n<function token>\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/movies/<int:movie_id>/rate-movie')\ndef rate_movie(movie_id):\n \"\"\"Get user rating score for movie\"\"\"\n user_rating = request.args.get('user_rating')\n user_email = session['logged_in_user_email']\n user = User.query.filter(User.email == user_email).one()\n user_id = user.user_id\n if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, \n Rating.user_id == user_id).all():\n db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating\n .user_id == user_id).update({'score': user_rating})\n db.session.commit()\n flash(\n 'You have rated this movie before! It has now been updated to %s.'\n % user_rating, 'warning')\n return redirect('/users/%s' % user_id)\n else:\n db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=\n user_rating))\n db.session.commit()\n flash('You have rated this movie a %s.' % user_rating, 'info')\n return redirect('/users/%s' % user_id)\n return render_template('rate_movie.html', user_rating=user_rating)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n return value.strftime(format)\n\n\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n user_email = session.get('logged_in_user_email', None)\n if user_email is not None:\n user = User.query.filter(User.email == user_email).one()\n return render_template('homepage.html', user=user)\n else:\n return render_template('homepage.html', user=None)\n\n\n<function token>\n<function token>\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\n<function token>\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\[email protected]_filter()\ndef datetimefilter(value, format='%b %d'):\n \"\"\"Convert a datetime to a different format so it can be accessible in Jinja.\"\"\"\n return value.strftime(format)\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\n<function token>\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/signup-login', methods=['GET'])\ndef show_forms():\n \"\"\"Show signup and login forms.\"\"\"\n return render_template('signup_login.html')\n\n\n<function token>\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"Check if user's email matches password, otherwise ask user to try again.\"\"\"\n login_email = request.form.get('login_email')\n login_password = request.form.get('login_password')\n if db.session.query(User).filter(User.email == login_email, User.\n password == login_password).first():\n flash('Login SUCCESS.', 'success')\n user = User.query.filter(User.email == login_email).one()\n session['logged_in_user_email'] = login_email\n session['logged_in_user'] = user.user_id\n return redirect('/users/%s' % user.user_id)\n else:\n flash('Incorrect password. Please try again!', 'danger')\n return redirect('/signup-login')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,399 |
c81a9f51b59765477c4fdc2f811afef54dd61f8f
|
import requests
from requests.compat import urljoin
def api_post(base_url, api, parameters):
api_endpoint = urljoin(base_url, api)
headers = {'Content-Type': 'application/json',
'X-Device-Name':'Arduino Furnace Monitor'}
post = requests.post(url=api_endpoint, headers=headers, json=parameters)
return post
base_url = 'http://192.168.0.10:8080'
api = 'iot-redirect/data/add/'
data = {'tag': 'AcComp001',
'value': 'On',
}
r = api_post(base_url, api, data)
print('Status:', r.status_code)
print('Reason:', r.reason)
print('Response text:', r.text)
|
[
"import requests\nfrom requests.compat import urljoin\n\n\ndef api_post(base_url, api, parameters):\n api_endpoint = urljoin(base_url, api)\n headers = {'Content-Type': 'application/json',\n 'X-Device-Name':'Arduino Furnace Monitor'}\n post = requests.post(url=api_endpoint, headers=headers, json=parameters)\n return post\n\n\nbase_url = 'http://192.168.0.10:8080'\napi = 'iot-redirect/data/add/'\n\ndata = {'tag': 'AcComp001',\n 'value': 'On',\n }\n\nr = api_post(base_url, api, data)\nprint('Status:', r.status_code)\nprint('Reason:', r.reason)\nprint('Response text:', r.text)\n",
"import requests\nfrom requests.compat import urljoin\n\n\ndef api_post(base_url, api, parameters):\n api_endpoint = urljoin(base_url, api)\n headers = {'Content-Type': 'application/json', 'X-Device-Name':\n 'Arduino Furnace Monitor'}\n post = requests.post(url=api_endpoint, headers=headers, json=parameters)\n return post\n\n\nbase_url = 'http://192.168.0.10:8080'\napi = 'iot-redirect/data/add/'\ndata = {'tag': 'AcComp001', 'value': 'On'}\nr = api_post(base_url, api, data)\nprint('Status:', r.status_code)\nprint('Reason:', r.reason)\nprint('Response text:', r.text)\n",
"<import token>\n\n\ndef api_post(base_url, api, parameters):\n api_endpoint = urljoin(base_url, api)\n headers = {'Content-Type': 'application/json', 'X-Device-Name':\n 'Arduino Furnace Monitor'}\n post = requests.post(url=api_endpoint, headers=headers, json=parameters)\n return post\n\n\nbase_url = 'http://192.168.0.10:8080'\napi = 'iot-redirect/data/add/'\ndata = {'tag': 'AcComp001', 'value': 'On'}\nr = api_post(base_url, api, data)\nprint('Status:', r.status_code)\nprint('Reason:', r.reason)\nprint('Response text:', r.text)\n",
"<import token>\n\n\ndef api_post(base_url, api, parameters):\n api_endpoint = urljoin(base_url, api)\n headers = {'Content-Type': 'application/json', 'X-Device-Name':\n 'Arduino Furnace Monitor'}\n post = requests.post(url=api_endpoint, headers=headers, json=parameters)\n return post\n\n\n<assignment token>\nprint('Status:', r.status_code)\nprint('Reason:', r.reason)\nprint('Response text:', r.text)\n",
"<import token>\n\n\ndef api_post(base_url, api, parameters):\n api_endpoint = urljoin(base_url, api)\n headers = {'Content-Type': 'application/json', 'X-Device-Name':\n 'Arduino Furnace Monitor'}\n post = requests.post(url=api_endpoint, headers=headers, json=parameters)\n return post\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.